From d1dd3eae71bb21adf1d688fb7adf947d05cc1d61 Mon Sep 17 00:00:00 2001 From: coanor Date: Thu, 18 Sep 2025 11:31:26 +0800 Subject: [PATCH 01/10] add demo on grpc dialtesting --- dialtesting/grpc.go | 137 + go.mod | 24 +- go.sum | 56 +- .../bufbuild/protocompile/.gitignore | 3 + .../bufbuild/protocompile/.golangci.yml | 105 + .../bufbuild/protocompile/.protoc_version | 1 + .../github.com/bufbuild/protocompile/LICENSE | 201 + .../github.com/bufbuild/protocompile/Makefile | 201 + .../bufbuild/protocompile/README.md | 91 + .../bufbuild/protocompile/ast/doc.go | 75 + .../bufbuild/protocompile/ast/enum.go | 185 + .../bufbuild/protocompile/ast/field.go | 795 ++ .../bufbuild/protocompile/ast/file.go | 358 + .../bufbuild/protocompile/ast/file_info.go | 701 ++ .../bufbuild/protocompile/ast/identifiers.go | 153 + .../bufbuild/protocompile/ast/message.go | 223 + .../bufbuild/protocompile/ast/no_source.go | 142 + .../bufbuild/protocompile/ast/node.go | 139 + .../bufbuild/protocompile/ast/options.go | 413 ++ .../bufbuild/protocompile/ast/ranges.go | 386 + .../bufbuild/protocompile/ast/service.go | 308 + .../bufbuild/protocompile/ast/values.go | 519 ++ .../bufbuild/protocompile/ast/walk.go | 931 +++ .../bufbuild/protocompile/compiler.go | 682 ++ .../github.com/bufbuild/protocompile/doc.go | 82 + .../github.com/bufbuild/protocompile/go.work | 6 + .../bufbuild/protocompile/go.work.sum | 235 + .../internal/editions/editions.go | 420 ++ .../featuresext/cpp_features.protoset | Bin 0 -> 605 bytes .../internal/featuresext/featuresext.go | 84 + .../featuresext/java_features.protoset | Bin 0 -> 856 bytes .../protocompile/internal/message_context.go | 98 + .../internal/messageset/messageset.go | 62 + .../bufbuild/protocompile/internal/norace.go | 19 + .../bufbuild/protocompile/internal/options.go | 71 + .../bufbuild/protocompile/internal/race.go | 19 + .../bufbuild/protocompile/internal/tags.go | 336 + .../bufbuild/protocompile/internal/types.go | 35 + .../bufbuild/protocompile/internal/util.go | 244 + .../protocompile/linker/descriptors.go | 1884 +++++ .../bufbuild/protocompile/linker/doc.go | 48 + .../bufbuild/protocompile/linker/files.go | 366 + .../bufbuild/protocompile/linker/linker.go | 153 + .../protocompile/linker/pathkey_no_unsafe.go | 35 + .../protocompile/linker/pathkey_unsafe.go | 40 + .../bufbuild/protocompile/linker/pool.go | 131 + .../bufbuild/protocompile/linker/resolve.go | 835 +++ .../bufbuild/protocompile/linker/symbols.go | 635 ++ .../bufbuild/protocompile/linker/validate.go | 1153 +++ .../bufbuild/protocompile/options/options.go | 2267 ++++++ .../options/source_retention_options.go | 539 ++ .../protocompile/options/target_types.go | 152 + .../bufbuild/protocompile/parser/.gitignore | 1 + .../bufbuild/protocompile/parser/ast.go | 144 + .../bufbuild/protocompile/parser/clone.go | 183 + .../bufbuild/protocompile/parser/doc.go | 25 + .../bufbuild/protocompile/parser/errors.go | 22 + .../bufbuild/protocompile/parser/lexer.go | 771 ++ .../bufbuild/protocompile/parser/parser.go | 201 + .../bufbuild/protocompile/parser/proto.y | 1498 ++++ .../bufbuild/protocompile/parser/proto.y.go | 2659 +++++++ .../bufbuild/protocompile/parser/result.go | 1012 +++ .../bufbuild/protocompile/parser/validate.go | 568 ++ .../protocompile/protoutil/editions.go | 140 + .../bufbuild/protocompile/protoutil/protos.go | 262 + .../bufbuild/protocompile/reporter/errors.go | 74 + .../protocompile/reporter/reporter.go | 219 + .../bufbuild/protocompile/resolver.go | 215 + .../sourceinfo/source_code_info.go | 962 +++ .../bufbuild/protocompile/std_imports.go | 96 + .../protocompile/supported_editions.go | 30 + .../bufbuild/protocompile/walk/walk.go | 446 ++ .../golang/protobuf/jsonpb/decode.go | 531 ++ .../golang/protobuf/jsonpb/encode.go | 560 ++ .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../github.com/golang/protobuf/ptypes/any.go | 180 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../github.com/golang/protobuf/ptypes/doc.go | 10 + .../golang/protobuf/ptypes/duration.go | 76 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../golang/protobuf/ptypes/timestamp.go | 112 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 21 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/uuid.go | 36 +- vendor/github.com/jhump/protoreflect/LICENSE | 202 + .../jhump/protoreflect/codec/codec.go | 218 + .../jhump/protoreflect/codec/decode_fields.go | 318 + .../jhump/protoreflect/codec/doc.go | 7 + .../jhump/protoreflect/codec/encode_fields.go | 288 + .../jhump/protoreflect/desc/cache.go | 48 + .../jhump/protoreflect/desc/convert.go | 294 + .../jhump/protoreflect/desc/descriptor.go | 1847 +++++ .../protoreflect/desc/descriptor_no_unsafe.go | 30 + .../protoreflect/desc/descriptor_unsafe.go | 59 + .../github.com/jhump/protoreflect/desc/doc.go | 62 + .../jhump/protoreflect/desc/imports.go | 360 + .../desc/internal/proto3_optional.go | 75 + .../protoreflect/desc/internal/registry.go | 67 + .../protoreflect/desc/internal/source_info.go | 107 + .../jhump/protoreflect/desc/internal/util.go | 296 + .../jhump/protoreflect/desc/load.go | 257 + .../jhump/protoreflect/desc/protoparse/ast.go | 716 ++ .../protoreflect/desc/protoparse/ast/doc.go | 27 + .../protoreflect/desc/protoparse/ast/enum.go | 154 + .../protoreflect/desc/protoparse/ast/field.go | 659 ++ .../protoreflect/desc/protoparse/ast/file.go | 236 + .../desc/protoparse/ast/identifiers.go | 134 + .../desc/protoparse/ast/message.go | 199 + .../desc/protoparse/ast/no_source.go | 103 + .../protoreflect/desc/protoparse/ast/node.go | 200 + .../desc/protoparse/ast/options.go | 361 + .../protoreflect/desc/protoparse/ast/print.go | 86 + .../desc/protoparse/ast/ranges.go | 305 + .../desc/protoparse/ast/service.go | 273 + .../desc/protoparse/ast/source_pos.go | 29 + .../desc/protoparse/ast/values.go | 575 ++ .../protoreflect/desc/protoparse/ast/walk.go | 497 ++ .../jhump/protoreflect/desc/protoparse/doc.go | 10 + .../protoreflect/desc/protoparse/errors.go | 122 + .../protoreflect/desc/protoparse/parser.go | 804 +++ .../desc/protoparse/resolve_files.go | 175 + .../desc/protoparse/test-source-info.txt | 6401 +++++++++++++++++ .../protoreflect/desc/sourceinfo/locations.go | 207 + .../protoreflect/desc/sourceinfo/registry.go | 269 + .../protoreflect/desc/sourceinfo/wrappers.go | 636 ++ .../jhump/protoreflect/desc/wrap.go | 211 + .../jhump/protoreflect/dynamic/binary.go | 193 + .../jhump/protoreflect/dynamic/doc.go | 159 + .../protoreflect/dynamic/dynamic_message.go | 2830 ++++++++ .../jhump/protoreflect/dynamic/equal.go | 157 + .../jhump/protoreflect/dynamic/extension.go | 46 + .../dynamic/extension_registry.go | 241 + .../protoreflect/dynamic/grpcdynamic/stub.go | 310 + .../jhump/protoreflect/dynamic/indent.go | 76 + .../jhump/protoreflect/dynamic/json.go | 1256 ++++ .../jhump/protoreflect/dynamic/maps_1.11.go | 131 + .../jhump/protoreflect/dynamic/maps_1.12.go | 139 + .../jhump/protoreflect/dynamic/merge.go | 100 + .../protoreflect/dynamic/message_factory.go | 207 + .../jhump/protoreflect/dynamic/text.go | 1177 +++ .../protoreflect/internal/codec/buffer.go | 118 + .../protoreflect/internal/codec/decode.go | 346 + .../protoreflect/internal/codec/encode.go | 147 + .../protoreflect/internal/standard_files.go | 127 + .../protoreflect/internal/unrecognized.go | 20 + .../x/crypto/sha3/hashes_generic.go | 1 - vendor/golang.org/x/crypto/sha3/keccakf.go | 1 - .../golang.org/x/crypto/sha3/keccakf_amd64.go | 1 - .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5 +- vendor/golang.org/x/crypto/sha3/register.go | 1 - vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 20 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 1 - .../golang.org/x/crypto/sha3/shake_generic.go | 1 - vendor/golang.org/x/crypto/sha3/xor.go | 1 - .../golang.org/x/crypto/sha3/xor_unaligned.go | 2 - vendor/golang.org/x/net/html/doc.go | 2 +- vendor/golang.org/x/net/html/token.go | 12 +- .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/databuffer.go | 59 +- vendor/golang.org/x/net/http2/frame.go | 51 +- vendor/golang.org/x/net/http2/go111.go | 30 - vendor/golang.org/x/net/http2/go115.go | 27 - vendor/golang.org/x/net/http2/go118.go | 17 - vendor/golang.org/x/net/http2/not_go111.go | 21 - vendor/golang.org/x/net/http2/not_go115.go | 31 - vendor/golang.org/x/net/http2/not_go118.go | 17 - vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 114 +- vendor/golang.org/x/net/http2/testsync.go | 331 + vendor/golang.org/x/net/http2/transport.go | 355 +- vendor/golang.org/x/net/icmp/helper_posix.go | 1 - vendor/golang.org/x/net/icmp/listen_posix.go | 1 - vendor/golang.org/x/net/icmp/listen_stub.go | 1 - vendor/golang.org/x/net/idna/go118.go | 1 - vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - vendor/golang.org/x/net/idna/pre_go118.go | 1 - vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - .../x/net/internal/socket/cmsghdr.go | 1 - .../x/net/internal/socket/cmsghdr_bsd.go | 1 - .../internal/socket/cmsghdr_linux_32bit.go | 2 - .../internal/socket/cmsghdr_linux_64bit.go | 2 - .../internal/socket/cmsghdr_solaris_64bit.go | 1 - .../x/net/internal/socket/cmsghdr_stub.go | 1 - .../x/net/internal/socket/cmsghdr_unix.go | 1 - .../net/internal/socket/complete_dontwait.go | 1 - .../internal/socket/complete_nodontwait.go | 1 - .../golang.org/x/net/internal/socket/empty.s | 1 - .../x/net/internal/socket/error_unix.go | 1 - .../x/net/internal/socket/iovec_32bit.go | 2 - .../x/net/internal/socket/iovec_64bit.go | 2 - .../internal/socket/iovec_solaris_64bit.go | 1 - .../x/net/internal/socket/iovec_stub.go | 1 - .../x/net/internal/socket/mmsghdr_stub.go | 1 - .../x/net/internal/socket/mmsghdr_unix.go | 1 - .../x/net/internal/socket/msghdr_bsd.go | 1 - .../x/net/internal/socket/msghdr_bsdvar.go | 1 - .../net/internal/socket/msghdr_linux_32bit.go | 2 - .../net/internal/socket/msghdr_linux_64bit.go | 2 - .../internal/socket/msghdr_solaris_64bit.go | 1 - .../x/net/internal/socket/msghdr_stub.go | 1 - .../x/net/internal/socket/msghdr_zos_s390x.go | 1 - .../x/net/internal/socket/norace.go | 1 - .../golang.org/x/net/internal/socket/race.go | 1 - .../x/net/internal/socket/rawconn_mmsg.go | 1 - .../x/net/internal/socket/rawconn_msg.go | 1 - .../x/net/internal/socket/rawconn_nommsg.go | 1 - .../x/net/internal/socket/rawconn_nomsg.go | 1 - .../x/net/internal/socket/sys_bsd.go | 1 - .../x/net/internal/socket/sys_const_unix.go | 1 - .../x/net/internal/socket/sys_linux.go | 1 - .../net/internal/socket/sys_linux_loong64.go | 1 - .../net/internal/socket/sys_linux_riscv64.go | 1 - .../x/net/internal/socket/sys_posix.go | 1 - .../x/net/internal/socket/sys_stub.go | 1 - .../x/net/internal/socket/sys_unix.go | 1 - .../x/net/internal/socket/zsys_aix_ppc64.go | 1 - .../net/internal/socket/zsys_linux_loong64.go | 1 - .../net/internal/socket/zsys_linux_riscv64.go | 1 - .../x/net/internal/timeseries/timeseries.go | 525 ++ vendor/golang.org/x/net/ipv4/control_bsd.go | 1 - .../golang.org/x/net/ipv4/control_pktinfo.go | 1 - vendor/golang.org/x/net/ipv4/control_stub.go | 1 - vendor/golang.org/x/net/ipv4/control_unix.go | 1 - vendor/golang.org/x/net/ipv4/icmp_stub.go | 1 - vendor/golang.org/x/net/ipv4/payload_cmsg.go | 1 - .../golang.org/x/net/ipv4/payload_nocmsg.go | 1 - vendor/golang.org/x/net/ipv4/sockopt_posix.go | 1 - vendor/golang.org/x/net/ipv4/sockopt_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_aix.go | 1 - vendor/golang.org/x/net/ipv4/sys_asmreq.go | 1 - .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 1 - .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_bpf.go | 1 - vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_bsd.go | 1 - vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 1 - .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_stub.go | 1 - .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 1 - .../x/net/ipv4/zsys_linux_loong64.go | 1 - .../x/net/ipv4/zsys_linux_riscv64.go | 1 - .../x/net/ipv6/control_rfc2292_unix.go | 1 - .../x/net/ipv6/control_rfc3542_unix.go | 1 - vendor/golang.org/x/net/ipv6/control_stub.go | 1 - vendor/golang.org/x/net/ipv6/control_unix.go | 1 - vendor/golang.org/x/net/ipv6/icmp_bsd.go | 1 - vendor/golang.org/x/net/ipv6/icmp_stub.go | 1 - vendor/golang.org/x/net/ipv6/payload_cmsg.go | 1 - .../golang.org/x/net/ipv6/payload_nocmsg.go | 1 - vendor/golang.org/x/net/ipv6/sockopt_posix.go | 1 - vendor/golang.org/x/net/ipv6/sockopt_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_aix.go | 1 - vendor/golang.org/x/net/ipv6/sys_asmreq.go | 1 - .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_bpf.go | 1 - vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_bsd.go | 1 - vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 1 - .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_stub.go | 1 - .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 1 - .../x/net/ipv6/zsys_linux_loong64.go | 1 - .../x/net/ipv6/zsys_linux_riscv64.go | 1 - vendor/golang.org/x/net/trace/events.go | 532 ++ vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 +++ vendor/golang.org/x/sync/LICENSE | 4 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 + vendor/golang.org/x/sync/errgroup/go120.go | 1 - .../golang.org/x/sync/errgroup/pre_go120.go | 1 - .../golang.org/x/sync/semaphore/semaphore.go | 160 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/cpu/cpu.go | 1 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 1 - vendor/golang.org/x/sys/cpu/cpu_arm64.go | 10 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 9 +- vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 2 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 2 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 2 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 1 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 2 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 1 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 2 - vendor/golang.org/x/sys/cpu/cpu_loong64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 1 - vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 1 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 2 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 3 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_s390x.s | 1 - vendor/golang.org/x/sys/cpu/cpu_wasm.go | 1 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 1 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 2 - vendor/golang.org/x/sys/cpu/endian_big.go | 1 - vendor/golang.org/x/sys/cpu/endian_little.go | 1 - .../x/sys/cpu/proc_cpuinfo_linux.go | 1 - .../x/sys/cpu/runtime_auxv_go121.go | 1 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 1 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 1 - .../golang.org/x/sys/execabs/execabs_go118.go | 1 - .../golang.org/x/sys/execabs/execabs_go119.go | 1 - vendor/golang.org/x/sys/unix/aliases.go | 4 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 668 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 ++ vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 221 - vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 - .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - vendor/golang.org/x/sys/unix/mkerrors.sh | 43 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 3 +- vendor/golang.org/x/sys/unix/mremap.go | 1 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 3 +- .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 3 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 + .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 + vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_darwin_libSystem.go | 3 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 - .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 132 +- .../x/sys/unix/syscall_linux_386.go | 1 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 1 - .../x/sys/unix/syscall_linux_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 1 - .../x/sys/unix/syscall_linux_mips64x.go | 2 - .../x/sys/unix/syscall_linux_mipsx.go | 2 - .../x/sys/unix/syscall_linux_ppc.go | 1 - .../x/sys/unix/syscall_linux_ppc64x.go | 2 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 28 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 5 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 1 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 1512 +++- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 3 +- .../x/sys/unix/sysvshm_unix_other.go | 3 +- vendor/golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 1 - .../x/sys/unix/zerrors_darwin_arm64.go | 1 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 113 +- .../x/sys/unix/zerrors_linux_386.go | 4 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 4 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_loong64.go | 5 +- .../x/sys/unix/zerrors_linux_mips.go | 4 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- .../x/sys/unix/zerrors_linux_ppc.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_riscv64.go | 7 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 234 +- .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../x/sys/unix/zsymaddr_zos_s390x.s | 364 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 1 - .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_386.go | 1 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 - .../x/sys/unix/zsyscall_illumos_amd64.go | 1 - .../golang.org/x/sys/unix/zsyscall_linux.go | 36 +- .../x/sys/unix/zsyscall_linux_386.go | 1 - .../x/sys/unix/zsyscall_linux_amd64.go | 1 - .../x/sys/unix/zsyscall_linux_arm.go | 1 - .../x/sys/unix/zsyscall_linux_arm64.go | 1 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 1 - .../x/sys/unix/zsyscall_linux_mips64.go | 1 - .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - .../x/sys/unix/zsyscall_linux_ppc.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - .../x/sys/unix/zsyscall_linux_s390x.go | 1 - .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - .../x/sys/unix/zsyscall_netbsd_386.go | 1 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 - .../x/sys/unix/zsyscall_openbsd_386.go | 70 +- .../x/sys/unix/zsyscall_openbsd_386.s | 20 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 20 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 20 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 20 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1 - .../x/sys/unix/zsyscall_zos_s390x.go | 3114 ++++++-- .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 11 +- .../x/sys/unix/zsysnum_linux_amd64.go | 11 +- .../x/sys/unix/zsysnum_linux_arm.go | 11 +- .../x/sys/unix/zsysnum_linux_arm64.go | 11 +- .../x/sys/unix/zsysnum_linux_loong64.go | 11 +- .../x/sys/unix/zsysnum_linux_mips.go | 11 +- .../x/sys/unix/zsysnum_linux_mips64.go | 11 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 11 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 11 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 11 +- .../x/sys/unix/zsysnum_linux_s390x.go | 11 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 11 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 5508 +++++++------- .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 1 - .../x/sys/unix/ztypes_darwin_arm64.go | 1 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 1 - .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_arm.go | 1 - .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 254 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 9 - .../x/sys/unix/ztypes_linux_amd64.go | 10 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 10 - .../x/sys/unix/ztypes_linux_arm64.go | 10 - .../x/sys/unix/ztypes_linux_loong64.go | 10 - .../x/sys/unix/ztypes_linux_mips.go | 10 - .../x/sys/unix/ztypes_linux_mips64.go | 10 - .../x/sys/unix/ztypes_linux_mips64le.go | 10 - .../x/sys/unix/ztypes_linux_mipsle.go | 10 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 10 - .../x/sys/unix/ztypes_linux_ppc64.go | 10 - .../x/sys/unix/ztypes_linux_ppc64le.go | 10 - .../x/sys/unix/ztypes_linux_riscv64.go | 10 - .../x/sys/unix/ztypes_linux_s390x.go | 10 - .../x/sys/unix/ztypes_linux_sparc64.go | 10 - .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 147 +- vendor/golang.org/x/sys/windows/aliases.go | 3 +- vendor/golang.org/x/sys/windows/empty.s | 9 - .../golang.org/x/sys/windows/env_windows.go | 17 +- vendor/golang.org/x/sys/windows/eventlog.go | 1 - vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - .../golang.org/x/sys/windows/registry/key.go | 1 - .../x/sys/windows/registry/mksyscall.go | 1 - .../x/sys/windows/registry/syscall.go | 1 - .../x/sys/windows/registry/value.go | 1 - vendor/golang.org/x/sys/windows/service.go | 1 - vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 92 +- .../golang.org/x/sys/windows/types_windows.go | 52 +- .../x/sys/windows/zsyscall_windows.go | 145 + .../x/text/secure/bidirule/bidirule10.0.0.go | 1 - .../x/text/secure/bidirule/bidirule9.0.0.go | 1 - .../x/text/unicode/bidi/tables10.0.0.go | 1 - .../x/text/unicode/bidi/tables11.0.0.go | 1 - .../x/text/unicode/bidi/tables12.0.0.go | 1 - .../x/text/unicode/bidi/tables13.0.0.go | 1 - .../x/text/unicode/bidi/tables15.0.0.go | 1 - .../x/text/unicode/bidi/tables9.0.0.go | 1 - .../x/text/unicode/norm/tables10.0.0.go | 1 - .../x/text/unicode/norm/tables11.0.0.go | 1 - .../x/text/unicode/norm/tables12.0.0.go | 1 - .../x/text/unicode/norm/tables13.0.0.go | 1 - .../x/text/unicode/norm/tables15.0.0.go | 1 - .../x/text/unicode/norm/tables9.0.0.go | 1 - .../genproto/googleapis/rpc/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 203 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 73 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 28 + vendor/google.golang.org/grpc/Makefile | 46 + vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 107 + vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 141 + vendor/google.golang.org/grpc/backoff.go | 61 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../grpc/balancer/balancer.go | 442 ++ .../grpc/balancer/base/balancer.go | 264 + .../grpc/balancer/base/base.go | 71 + .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_wrapper.go | 380 + .../grpc_binarylog_v1/binarylog.pb.go | 1183 +++ vendor/google.golang.org/grpc/call.go | 74 + .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 1876 +++++ vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 111 + vendor/google.golang.org/grpc/codes/codes.go | 250 + .../grpc/connectivity/connectivity.go | 94 + .../grpc/credentials/credentials.go | 291 + .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 251 + vendor/google.golang.org/grpc/dialoptions.go | 718 ++ vendor/google.golang.org/grpc/doc.go | 26 + .../grpc/encoding/encoding.go | 130 + .../grpc/encoding/proto/proto.go | 58 + .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 258 + vendor/google.golang.org/grpc/interceptor.go | 104 + .../grpc/internal/backoff/backoff.go | 109 + .../balancer/gracefulswitch/gracefulswitch.go | 385 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 192 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 208 + .../grpc/internal/binarylog/method_logger.go | 445 ++ .../grpc/internal/binarylog/sink.go | 170 + .../grpc/internal/buffer/unbounded.go | 116 + .../grpc/internal/channelz/funcs.go | 763 ++ .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 79 + .../grpc/internal/channelz/types.go | 727 ++ .../grpc/internal/channelz/types_linux.go | 51 + .../grpc/internal/channelz/types_nonlinux.go | 43 + .../grpc/internal/channelz/util_linux.go | 37 + .../grpc/internal/channelz/util_nonlinux.go | 27 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 75 + .../grpc/internal/credentials/syscallconn.go | 58 + .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 69 + .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 56 + .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 93 + .../grpc/internal/grpcrand/grpcrand.go | 95 + .../internal/grpcsync/callback_serializer.go | 100 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 88 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/idle/idle.go | 278 + .../grpc/internal/internal.go | 226 + .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 441 ++ .../resolver/dns/internal/internal.go | 70 + .../resolver/passthrough/passthrough.go | 64 + .../grpc/internal/resolver/unix/unix.go | 78 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 180 + .../grpc/internal/status/status.go | 204 + .../grpc/internal/syscall/syscall_linux.go | 112 + .../grpc/internal/syscall/syscall_nonlinux.go | 77 + .../grpc/internal/tcp_keepalive_others.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 1007 +++ .../grpc/internal/transport/defaults.go | 55 + .../grpc/internal/transport/flowcontrol.go | 215 + .../grpc/internal/transport/handler_server.go | 488 ++ .../grpc/internal/transport/http2_client.go | 1790 +++++ .../grpc/internal/transport/http2_server.go | 1446 ++++ .../grpc/internal/transport/http_util.go | 465 ++ .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/internal/transport/proxy.go | 144 + .../grpc/internal/transport/transport.go | 851 +++ .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 300 + vendor/google.golang.org/grpc/peer/peer.go | 53 + .../google.golang.org/grpc/picker_wrapper.go | 223 + vendor/google.golang.org/grpc/pickfirst.go | 249 + vendor/google.golang.org/grpc/preloader.go | 67 + vendor/google.golang.org/grpc/regenerate.sh | 123 + .../grpc/resolver/dns/dns_resolver.go | 36 + vendor/google.golang.org/grpc/resolver/map.go | 251 + .../grpc/resolver/resolver.go | 326 + .../grpc/resolver_wrapper.go | 197 + vendor/google.golang.org/grpc/rpc_util.go | 963 +++ vendor/google.golang.org/grpc/server.go | 2208 ++++++ .../google.golang.org/grpc/service_config.go | 347 + .../grpc/serviceconfig/serviceconfig.go | 44 + .../grpc/shared_buffer_pool.go | 154 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 343 + .../google.golang.org/grpc/status/status.go | 162 + vendor/google.golang.org/grpc/stream.go | 1782 +++++ vendor/google.golang.org/grpc/tap/tap.go | 62 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 190 + .../protobuf/encoding/protojson/decode.go | 685 ++ .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 382 + .../encoding/protojson/well_known_types.go | 876 +++ .../protobuf/encoding/prototext/decode.go | 12 +- .../protobuf/encoding/prototext/encode.go | 24 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 184 +- .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults/editions_defaults.binpb | Bin 0 -> 93 bytes .../internal/editionssupport/editions.go | 13 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../protobuf/internal/encoding/tag/tag.go | 4 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/errors.go | 21 +- .../protobuf/internal/filedesc/desc.go | 166 +- .../protobuf/internal/filedesc/desc_init.go | 89 +- .../protobuf/internal/filedesc/desc_lazy.go | 45 +- .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/filedesc/editions.go | 156 + .../protobuf/internal/filedesc/placeholder.go | 1 + .../protobuf/internal/filetype/build.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 401 +- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/api_export.go | 6 +- .../protobuf/internal/impl/checkinit.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 44 +- .../protobuf/internal/impl/codec_field.go | 64 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_map.go | 15 +- .../internal/impl/codec_messageset.go | 22 + .../protobuf/internal/impl/codec_tables.go | 2 +- .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_list.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/encode.go | 48 +- .../protobuf/internal/impl/extension.go | 8 +- .../protobuf/internal/impl/legacy_enum.go | 3 +- .../internal/impl/legacy_extension.go | 2 +- .../protobuf/internal/impl/legacy_file.go | 4 +- .../protobuf/internal/impl/legacy_message.go | 31 +- .../protobuf/internal/impl/message.go | 23 +- .../protobuf/internal/impl/message_reflect.go | 45 +- .../internal/impl/message_reflect_field.go | 2 +- .../internal/impl/message_reflect_gen.go | 146 +- .../protobuf/internal/impl/pointer_reflect.go | 42 +- .../protobuf/internal/impl/pointer_unsafe.go | 44 +- .../protobuf/internal/order/range.go | 4 +- .../protobuf/internal/strs/strings.go | 2 +- ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 46 +- .../protobuf/proto/extension.go | 19 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../google.golang.org/protobuf/proto/size.go | 2 + .../protobuf/reflect/protodesc/desc.go | 34 +- .../protobuf/reflect/protodesc/desc_init.go | 43 +- .../reflect/protodesc/desc_resolve.go | 9 +- .../reflect/protodesc/desc_validate.go | 75 +- .../protobuf/reflect/protodesc/editions.go | 145 + .../protobuf/reflect/protodesc/proto.go | 40 +- .../protobuf/reflect/protoreflect/proto.go | 87 +- .../reflect/protoreflect/source_gen.go | 85 +- .../protobuf/reflect/protoreflect/type.go | 56 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_pure.go | 14 +- .../reflect/protoreflect/value_union.go | 58 +- ...{value_unsafe.go => value_unsafe_go120.go} | 10 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 38 +- .../types/descriptorpb/descriptor.pb.go | 2594 +++++-- .../protobuf/types/dynamicpb/dynamic.go | 718 ++ .../protobuf/types/dynamicpb/types.go | 184 + .../types/gofeaturespb/go_features.pb.go | 181 + .../protobuf/types/known/anypb/any.pb.go | 7 +- .../protobuf/types/known/apipb/api.pb.go | 575 ++ .../types/known/durationpb/duration.pb.go | 374 + .../protobuf/types/known/emptypb/empty.pb.go | 166 + .../types/known/fieldmaskpb/field_mask.pb.go | 588 ++ .../sourcecontextpb/source_context.pb.go | 176 + .../types/known/structpb/struct.pb.go | 810 +++ .../types/known/timestamppb/timestamp.pb.go | 4 +- .../protobuf/types/known/typepb/type.pb.go | 990 +++ .../types/known/wrapperspb/wrappers.pb.go | 760 ++ .../protobuf/types/pluginpb/plugin.pb.go | 690 ++ vendor/modules.txt | 144 +- 883 files changed, 112333 insertions(+), 6662 deletions(-) create mode 100644 dialtesting/grpc.go create mode 100644 vendor/github.com/bufbuild/protocompile/.gitignore create mode 100644 vendor/github.com/bufbuild/protocompile/.golangci.yml create mode 100644 vendor/github.com/bufbuild/protocompile/.protoc_version create mode 100644 vendor/github.com/bufbuild/protocompile/LICENSE create mode 100644 vendor/github.com/bufbuild/protocompile/Makefile create mode 100644 vendor/github.com/bufbuild/protocompile/README.md create mode 100644 vendor/github.com/bufbuild/protocompile/ast/doc.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/enum.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/field.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/file.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/file_info.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/identifiers.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/message.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/no_source.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/node.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/options.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/ranges.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/service.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/values.go create mode 100644 vendor/github.com/bufbuild/protocompile/ast/walk.go create mode 100644 vendor/github.com/bufbuild/protocompile/compiler.go create mode 100644 vendor/github.com/bufbuild/protocompile/doc.go create mode 100644 vendor/github.com/bufbuild/protocompile/go.work create mode 100644 vendor/github.com/bufbuild/protocompile/go.work.sum create mode 100644 vendor/github.com/bufbuild/protocompile/internal/editions/editions.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset create mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset create mode 100644 vendor/github.com/bufbuild/protocompile/internal/message_context.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/norace.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/options.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/race.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/tags.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/types.go create mode 100644 vendor/github.com/bufbuild/protocompile/internal/util.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/descriptors.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/doc.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/files.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/linker.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/pool.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/resolve.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/symbols.go create mode 100644 vendor/github.com/bufbuild/protocompile/linker/validate.go create mode 100644 vendor/github.com/bufbuild/protocompile/options/options.go create mode 100644 vendor/github.com/bufbuild/protocompile/options/source_retention_options.go create mode 100644 vendor/github.com/bufbuild/protocompile/options/target_types.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/.gitignore create mode 100644 vendor/github.com/bufbuild/protocompile/parser/ast.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/clone.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/doc.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/errors.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/lexer.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/parser.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/proto.y create mode 100644 vendor/github.com/bufbuild/protocompile/parser/proto.y.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/result.go create mode 100644 vendor/github.com/bufbuild/protocompile/parser/validate.go create mode 100644 vendor/github.com/bufbuild/protocompile/protoutil/editions.go create mode 100644 vendor/github.com/bufbuild/protocompile/protoutil/protos.go create mode 100644 vendor/github.com/bufbuild/protocompile/reporter/errors.go create mode 100644 vendor/github.com/bufbuild/protocompile/reporter/reporter.go create mode 100644 vendor/github.com/bufbuild/protocompile/resolver.go create mode 100644 vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go create mode 100644 vendor/github.com/bufbuild/protocompile/std_imports.go create mode 100644 vendor/github.com/bufbuild/protocompile/supported_editions.go create mode 100644 vendor/github.com/bufbuild/protocompile/walk/walk.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/jhump/protoreflect/LICENSE create mode 100644 vendor/github.com/jhump/protoreflect/codec/codec.go create mode 100644 vendor/github.com/jhump/protoreflect/codec/decode_fields.go create mode 100644 vendor/github.com/jhump/protoreflect/codec/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/codec/encode_fields.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/cache.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/convert.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/descriptor.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/imports.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/internal/registry.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/internal/source_info.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/internal/util.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/load.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt create mode 100644 vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go create mode 100644 vendor/github.com/jhump/protoreflect/desc/wrap.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/binary.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/equal.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/extension.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/indent.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/json.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/merge.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/message_factory.go create mode 100644 vendor/github.com/jhump/protoreflect/dynamic/text.go create mode 100644 vendor/github.com/jhump/protoreflect/internal/codec/buffer.go create mode 100644 vendor/github.com/jhump/protoreflect/internal/codec/decode.go create mode 100644 vendor/github.com/jhump/protoreflect/internal/codec/encode.go create mode 100644 vendor/github.com/jhump/protoreflect/internal/standard_files.go create mode 100644 vendor/github.com/jhump/protoreflect/internal/unrecognized.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/go115.go delete mode 100644 vendor/golang.org/x/net/http2/go118.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go create mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb create mode 100644 vendor/google.golang.org/protobuf/internal/editionssupport/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (93%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/types.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go new file mode 100644 index 00000000..16ffeccb --- /dev/null +++ b/dialtesting/grpc.go @@ -0,0 +1,137 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + pdesc "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/protoparse" + "github.com/jhump/protoreflect/dynamic" + "github.com/jhump/protoreflect/dynamic/grpcdynamic" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type GRPCTask struct { + *Task + Server string `json:"server"` + FullMethod string `json:"full_method"` + ProtoFiles map[string][]byte `json:"protofiles"` // user's multiple .proto files + JSONRequest []byte `json:"request"` // user's gRPC request are JSON bytes + + conn *grpc.ClientConn + method *pdesc.MethodDescriptor + + result []byte +} + +func (t *GRPCTask) stop() { + if err := t.conn.Close(); err != nil { + return fmt.Errorf("gRPC connection close: %w", err) + } + return nil +} + +func (t *GRPCTask) init() error { + conn, err := grpc.Dial(t.Server, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + t.conn = conn + + if err := t.findMethod(); err != nil { + return err + } + + return nil +} + +func (t *GRPCTask) findMethod() error { + if len(t.ProtoFiles) == 0 { + return findMethodByReflection() + } + + if err := t.findMethodAmongProtofiles(); err != nil { + if err := t.findMethodByReflection(); err != nil { + return err + } + } + + return nil +} + +func (t *GRPCTask) findMethodByReflection() error { + // TODO + return fmt.Errorf("TODO") +} + +func (t *GRPCTask) findMethodAmongProtofiles() error { + p := protoparse.Parser{ + Accessor: protoparse.FileContentsFromMap(t.ProtoFiles), + } + + desc, err := p.ParseFiles(getFileNames(t.ProtoFiles)...) + if err != nil { + return err + } + + sepIdx := strings.LastIndex(t.FullMethod, ".") + if sepIdx == -1 { + return fmt.Errorf("invalid FullMethod: %q", t.FullMethod) + } + + service := t.FullMethod[:sepIdx] + method := t.FullMethod[sepIdx+1:] + + //reg := &protoregistry.Files{} + for _, fd := range desc { + if sd := fd.FindService(service); sd != nil { + if md := sd.FindMethodByName(method); md != nil { + t.method = md + } + } + } + + if t.method == nil { + return fmt.Errorf("method %s not found among proto files", method) + } +} + +func getFileNames(files map[string][]byte) []string { + arr := make([]string, 0, len(files)) + for k := range files { + arr = append(arr, k) + } + return arr +} + +func (t *GRPCTask) run() error { + // create dynamic gRPC request + msg := dynamic.NewMessage(t.method.GetInputType()) + if err := msg.UnmarshalJSON(t.JSONRequest); err != nil { + return fmt.Errorf("invalid message for method %q: %w", t.method.GetName(), err) + } + + stub := grpcdynamic.NewStub(t.conn) + resp, err := stub.InvokeRpc(context.Background(), t.method, req) + if err != nil { + // dialtest failed + return err + } + + // dial test message + if j, err := json.Marshal(resp); err != nil { + return err + } else { + t.result = j + } + + return nil +} diff --git a/go.mod b/go.mod index 615c8b23..89583c8b 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/influxdata/line-protocol/v2 v2.2.1 + github.com/jhump/protoreflect v1.16.0 github.com/klauspost/compress v1.16.7 github.com/pierrec/lz4/v4 v4.1.18 github.com/prometheus/client_golang v1.16.0 @@ -28,9 +29,10 @@ require ( github.com/tidwall/wal v1.1.7 go.uber.org/zap v1.23.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/net v0.16.0 - golang.org/x/sys v0.13.0 - google.golang.org/protobuf v1.31.0 + golang.org/x/net v0.25.0 + golang.org/x/sys v0.20.0 + google.golang.org/grpc v1.61.0 + google.golang.org/protobuf v1.34.2 gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 @@ -47,9 +49,9 @@ require ( github.com/antchfx/xmlquery v1.3.18 // indirect github.com/antchfx/xpath v1.2.4 // indirect github.com/araddon/dateparse v0.0.0-20201001162425-8aadafed4dc4 // indirect - github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bufbuild/protocompile v0.14.1 // indirect github.com/bytedance/sonic v1.8.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect @@ -64,12 +66,11 @@ require ( github.com/gobwas/pool v0.2.1 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/uuid v1.4.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect - github.com/ip2location/ip2location-go v8.3.0+incompatible // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect @@ -80,8 +81,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mssola/user_agent v0.6.0 // indirect - github.com/oschwald/geoip2-golang v1.9.0 // indirect - github.com/oschwald/maxminddb-golang v1.11.0 // indirect github.com/outcaste-io/ristretto v0.2.1 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/philhofer/fwd v1.1.1 // indirect @@ -100,13 +99,14 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect - golang.org/x/crypto v0.14.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/mod v0.13.0 // indirect - golang.org/x/sync v0.4.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect diff --git a/go.sum b/go.sum index e4c9e00f..ee282956 100644 --- a/go.sum +++ b/go.sum @@ -68,18 +68,12 @@ github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZ github.com/DataDog/sketches-go v1.0.0/go.mod h1:O+XkJHWk9w4hDwY2ZUDU31ZC9sNYlYo8DiFsxjYeo1k= github.com/DataDog/sketches-go v1.2.1 h1:qTBzWLnZ3kM2kw39ymh6rMcnN+5VULwFs++lEYUUsro= github.com/DataDog/sketches-go v1.2.1/go.mod h1:1xYmPLY1So10AwxV6MJV0J53XVH+WL9Ad1KetxVivVI= -github.com/GuanceCloud/grok v1.1.4 h1:+w/U5a54cgY0O+dvfcKc2qD3JuhmaS8Hi29BM4QMYts= -github.com/GuanceCloud/grok v1.1.4/go.mod h1:AHkJZYf7Qbo1FTZT6htdyScpICpgnkQ5+Hc0EmA88vM= github.com/GuanceCloud/grok v1.1.5-0.20250416104424-34917bd63e69 h1:LRwIH9nsCsVXTNVe6G/27n14+YWA9+Xg5DVWFNtMHcc= github.com/GuanceCloud/grok v1.1.5-0.20250416104424-34917bd63e69/go.mod h1:AHkJZYf7Qbo1FTZT6htdyScpICpgnkQ5+Hc0EmA88vM= github.com/GuanceCloud/influxdb1-client v0.1.9 h1:7RyOYJMbj+NOPlKoq/Zwh4Lo/jbQgqgs+sKwo/5/6ao= github.com/GuanceCloud/influxdb1-client v0.1.9/go.mod h1:4HC4b/O653/ezBiHMPBnHYnHCCfsNT2LvCr7wNLngw4= -github.com/GuanceCloud/pipeline-go v1.0.1 h1:f1fK2I6HZvvgYwMV54YNEoKMZY4qhOzlGHVys6N6+Fw= -github.com/GuanceCloud/pipeline-go v1.0.1/go.mod h1:gZG7SQzQNHHss91WIb3mo4Xp+E+TmB+3nP4SBkUCTlM= github.com/GuanceCloud/pipeline-go v1.0.9-0.20250804083758-0b4dd0f48771 h1:70uR05fe7QevdIc6LFGh3hgwfJoiGLwAVuIGoGhnNW4= github.com/GuanceCloud/pipeline-go v1.0.9-0.20250804083758-0b4dd0f48771/go.mod h1:ImLVtod1YFBrmFIdhsyf/OWwnwBbXcPBldCjqB93020= -github.com/GuanceCloud/platypus v0.3.1 h1:AL41baV38eL9daadQ3ZP17HnBRuu7m4cfJSNuIGaKP8= -github.com/GuanceCloud/platypus v0.3.1/go.mod h1:H9Sol/SI+A9ppJUohdn9m/UA0aiNvh+G0/GnY6IVDnI= github.com/GuanceCloud/platypus v0.3.3-0.20250528074826-e3130ff5a05c h1:DE7qQ8Vw3+/sbIiRZ+43m9X0cNtfS+NrmVU0htylAao= github.com/GuanceCloud/platypus v0.3.3-0.20250528074826-e3130ff5a05c/go.mod h1:H9Sol/SI+A9ppJUohdn9m/UA0aiNvh+G0/GnY6IVDnI= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -120,8 +114,6 @@ github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= -github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -149,6 +141,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= @@ -365,8 +359,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -386,7 +380,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -415,8 +409,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -495,8 +489,6 @@ github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY= github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= -github.com/ip2location/ip2location-go v8.3.0+incompatible h1:QwUE+FlSbo6bjOWZpv2Grb57vJhWYFNPyBj2KCvfWaM= -github.com/ip2location/ip2location-go v8.3.0+incompatible/go.mod h1:3JUY1TBjTx1GdA7oRT7Zeqfc0bg3lMMuU5lXmzdpuME= github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -541,6 +533,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= +github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -689,10 +683,6 @@ github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.m github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= -github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= -github.com/oschwald/maxminddb-golang v1.11.0 h1:aSXMqYR/EPNjGE8epgqwDay+P30hCBZIveY0WZbAWh0= -github.com/oschwald/maxminddb-golang v1.11.0/go.mod h1:YmVI+H0zh3ySFR3w+oz8PCfglAFj3PuCmui13+P9zDg= github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -923,8 +913,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1020,8 +1010,8 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1050,8 +1040,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1136,8 +1126,8 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1153,8 +1143,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1332,6 +1322,8 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1360,6 +1352,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1374,8 +1368,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 h1:+FLF3KjV2Syb0WlJ60dYTww7aPOJmOA5l0/Kg4AtkSo= gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17/go.mod h1:sxFF5v+R56bO5bE/mN0K39GXaODxNiMrUM9K9xSjs+Q= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/vendor/github.com/bufbuild/protocompile/.gitignore b/vendor/github.com/bufbuild/protocompile/.gitignore new file mode 100644 index 00000000..65b3b16c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/.gitignore @@ -0,0 +1,3 @@ +*.iml +.idea/ +/.tmp/ diff --git a/vendor/github.com/bufbuild/protocompile/.golangci.yml b/vendor/github.com/bufbuild/protocompile/.golangci.yml new file mode 100644 index 00000000..0f00b31d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/.golangci.yml @@ -0,0 +1,105 @@ +linters-settings: + errcheck: + check-type-assertions: true + forbidigo: + forbid: + - '^fmt\.Print' + - '^log\.' + - '^print$' + - '^println$' + - '^panic$' + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot`, + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/bufbuild/protocompile) # Custom section: groups all imports with the specified Prefix. + godox: + # TODO, OPT, etc. comments are fine to commit. Use FIXME comments for + # temporary hacks, and use godox to prevent committing them. + keywords: [FIXME] + govet: + enable: + - fieldalignment + varnamelen: + ignore-decls: + - T any + - i int + - wg sync.WaitGroup +linters: + enable-all: true + disable: + # TODO: TCN-350 - initial exclusions for failing linters. + # Should enable all of these? + - depguard + - dupl + - errname + - errorlint + - exhaustive + - exhaustruct + - forbidigo + - forcetypeassert + - gochecknoglobals + - gochecknoinits + - goconst + - gocyclo + - err113 + - interfacebloat + - nestif + - nilerr + - nilnil + - nonamedreturns + - thelper + - varnamelen + # Other disabled linters + - cyclop # covered by gocyclo + - execinquery # deprecated in golangci v1.58.0 + - funlen # rely on code review to limit function length + - gocognit # dubious "cognitive overhead" quantification + - gofumpt # prefer standard gofmt + - gomnd # some unnamed constants are okay + - inamedparam # named params in interface signatures are not always necessary + - ireturn # "accept interfaces, return structs" isn't ironclad + - lll # don't want hard limits for line length + - maintidx # covered by gocyclo + - mnd # some unnamed constants are okay + - nlreturn # generous whitespace violates house style + - protogetter # lots of false positives: can't use getter to check if field is present + - rowserrcheck # no SQL code in protocompile + - sqlclosecheck # no SQL code in protocompile + - testpackage # internal tests are fine + - wastedassign # not supported with generics + - wrapcheck # don't _always_ need to wrap errors + - wsl # generous whitespace violates house style +issues: + exclude-dirs-use-default: false + exclude-files: + - ".*\\.y\\.go$" + exclude: + # Don't ban use of fmt.Errorf to create new errors, but the remaining + # checks from err113 are useful. + - "do not define dynamic errors.*" + exclude-rules: + # Benchmarks can't be run in parallel + - path: benchmark_test\.go + linters: + - paralleltest + # dupword reports several errors in .proto test fixtures + # gosec reports a few minor issues in tests + - path: _test\.go + linters: + - dupword + - gosec + # exclude field alignment linter in tests + - path: _test\.go + text: "fieldalignment:" + linters: + - govet + # exclude fieldalignment "pointer bytes" failures + - text: "pointer bytes" + linters: + - govet diff --git a/vendor/github.com/bufbuild/protocompile/.protoc_version b/vendor/github.com/bufbuild/protocompile/.protoc_version new file mode 100644 index 00000000..a0d6856d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/.protoc_version @@ -0,0 +1 @@ +27.0 diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE new file mode 100644 index 00000000..553cbbf1 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2024 Buf Technologies, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/bufbuild/protocompile/Makefile b/vendor/github.com/bufbuild/protocompile/Makefile new file mode 100644 index 00000000..6ec2e78e --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/Makefile @@ -0,0 +1,201 @@ +# See https://tech.davis-hansson.com/p/make/ +SHELL := bash +.DELETE_ON_ERROR: +.SHELLFLAGS := -eu -o pipefail -c +.DEFAULT_GOAL := all +MAKEFLAGS += --warn-undefined-variables +MAKEFLAGS += --no-builtin-rules +MAKEFLAGS += --no-print-directory +BIN ?= $(abspath .tmp/bin) +CACHE := $(abspath .tmp/cache) +COPYRIGHT_YEARS := 2020-2024 +LICENSE_IGNORE := -e /testdata/ +# Set to use a different compiler. For example, `GO=go1.18rc1 make test`. +GO ?= go +TOOLS_MOD_DIR := ./internal/tools +UNAME_OS := $(shell uname -s) +UNAME_ARCH := $(shell uname -m) +PATH_SEP ?= ":" + +PROTOC_VERSION := $(shell cat ./.protoc_version) +# For release candidates, the download artifact has a dash between "rc" and the number even +# though the version tag does not :( +PROTOC_ARTIFACT_VERSION := $(shell echo $(PROTOC_VERSION) | sed -E 's/-rc([0-9]+)$$/-rc-\1/g') +PROTOC_DIR := $(abspath $(CACHE)/protoc/$(PROTOC_VERSION)) +PROTOC := $(PROTOC_DIR)/bin/protoc + +LOWER_UNAME_OS := $(shell echo $(UNAME_OS) | tr A-Z a-z) +ifeq ($(LOWER_UNAME_OS),darwin) + PROTOC_OS := osx + ifeq ($(UNAME_ARCH),arm64) + PROTOC_ARCH := aarch_64 + else + PROTOC_ARCH := x86_64 + endif +else + PROTOC_OS := $(LOWER_UNAME_OS) + PROTOC_ARCH := $(UNAME_ARCH) +endif +PROTOC_ARTIFACT_SUFFIX ?= $(PROTOC_OS)-$(PROTOC_ARCH) + +.PHONY: help +help: ## Describe useful make targets + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "%-30s %s\n", $$1, $$2}' + +.PHONY: all +all: ## Build, test, and lint (default) + $(MAKE) test + $(MAKE) lint + +.PHONY: clean +clean: ## Delete intermediate build artifacts + @# -X only removes untracked files, -d recurses into directories, -f actually removes files/dirs + git clean -Xdf + +.PHONY: test +test: build ## Run unit tests + $(GO) test -race -cover ./... + $(GO) test -tags protolegacy ./... + $(GO) test -tags purego ./... + cd internal/benchmarks && SKIP_DOWNLOAD_GOOGLEAPIS=true $(GO) test -race -cover ./... + +.PHONY: benchmarks +benchmarks: build ## Run benchmarks + cd internal/benchmarks && $(GO) test -bench=. -benchmem -v ./... + +.PHONY: build +build: generate ## Build all packages + $(GO) build ./... + +.PHONY: install +install: ## Install all binaries + $(GO) install ./... + +.PHONY: lint +lint: $(BIN)/golangci-lint ## Lint Go + $(GO) vet ./... ./internal/benchmarks/... + $(BIN)/golangci-lint run + cd internal/benchmarks && $(BIN)/golangci-lint run + +.PHONY: lintfix +lintfix: $(BIN)/golangci-lint ## Automatically fix some lint errors + $(BIN)/golangci-lint run --fix + cd internal/benchmarks && $(BIN)/golangci-lint run --fix + +.PHONY: generate +generate: $(BIN)/license-header $(BIN)/goyacc test-descriptors ext-features-descriptors ## Regenerate code and licenses + PATH="$(BIN)$(PATH_SEP)$(PATH)" $(GO) generate ./... + @# We want to operate on a list of modified and new files, excluding + @# deleted and ignored files. git-ls-files can't do this alone. comm -23 takes + @# two files and prints the union, dropping lines common to both (-3) and + @# those only in the second file (-2). We make one git-ls-files call for + @# the modified, cached, and new (--others) files, and a second for the + @# deleted files. + comm -23 \ + <(git ls-files --cached --modified --others --no-empty-directory --exclude-standard | sort -u | grep -v $(LICENSE_IGNORE) ) \ + <(git ls-files --deleted | sort -u) | \ + xargs $(BIN)/license-header \ + --license-type apache \ + --copyright-holder "Buf Technologies, Inc." \ + --year-range "$(COPYRIGHT_YEARS)" + +.PHONY: upgrade +upgrade: ## Upgrade dependencies + go get -u -t ./... && go mod tidy -v + +.PHONY: checkgenerate +checkgenerate: + @# Used in CI to verify that `make generate` doesn't produce a diff. + test -z "$$(git status --porcelain | tee /dev/stderr)" + +$(BIN)/license-header: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + GOWORK=off $(GO) build -o $@ github.com/bufbuild/buf/private/pkg/licenseheader/cmd/license-header + +$(BIN)/golangci-lint: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + GOWORK=off $(GO) build -o $@ github.com/golangci/golangci-lint/cmd/golangci-lint + +$(BIN)/goyacc: internal/tools/go.mod internal/tools/go.sum + @mkdir -p $(@D) + cd $(TOOLS_MOD_DIR) && \ + GOWORK=off $(GO) build -o $@ golang.org/x/tools/cmd/goyacc + +$(CACHE)/protoc-$(PROTOC_VERSION).zip: + @mkdir -p $(@D) + curl -o $@ -fsSL https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_ARTIFACT_VERSION)-$(PROTOC_ARTIFACT_SUFFIX).zip + +.PHONY: protoc +protoc: $(PROTOC) + +$(PROTOC): $(CACHE)/protoc-$(PROTOC_VERSION).zip + @mkdir -p $(@D) + unzip -o -q $< -d $(PROTOC_DIR) && \ + touch $@ + +.PHONY: wellknownimports +wellknownimports: $(PROTOC) $(sort $(wildcard $(PROTOC_DIR)/include/google/protobuf/*.proto)) $(sort $(wildcard $(PROTOC_DIR)/include/google/protobuf/*/*.proto)) + @rm -rf wellknownimports/google 2>/dev/null && true + @mkdir -p wellknownimports/google/protobuf/compiler + cp -R $(PROTOC_DIR)/include/google/protobuf/*.proto wellknownimports/google/protobuf + cp -R $(PROTOC_DIR)/include/google/protobuf/compiler/*.proto wellknownimports/google/protobuf/compiler + +internal/testdata/all.protoset: $(PROTOC) $(sort $(wildcard internal/testdata/*.proto)) + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_complex.protoset: $(PROTOC) internal/testdata/desc_test_complex.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_defaults.protoset: $(PROTOC) internal/testdata/desc_test_defaults.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/desc_test_proto3_optional.protoset: $(PROTOC) internal/testdata/desc_test_proto3_optional.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/descriptor_impl_tests.protoset: $(PROTOC) internal/testdata/desc_test2.proto internal/testdata/desc_test_complex.proto internal/testdata/desc_test_defaults.proto internal/testdata/desc_test_proto3.proto internal/testdata/desc_test_proto3_optional.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/descriptor_editions_impl_tests.protoset: $(PROTOC) internal/testdata/editions/all_default_features.proto internal/testdata/editions/features_with_overrides.proto internal/testdata/editions/file_default_delimited.proto + cd $(@D)/editions && $(PROTOC) --descriptor_set_out=../$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/editions/all.protoset: $(PROTOC) $(sort $(wildcard internal/testdata/editions/*.proto)) + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) + +internal/testdata/source_info.protoset: $(PROTOC) internal/testdata/desc_test_options.proto internal/testdata/desc_test_comments.proto internal/testdata/desc_test_complex.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_source_info -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/options.protoset: $(PROTOC) internal/testdata/options/options.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/test.protoset: $(PROTOC) internal/testdata/options/test.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/test_proto3.protoset: $(PROTOC) internal/testdata/options/test_proto3.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +internal/testdata/options/test_editions.protoset: $(PROTOC) internal/testdata/options/test_editions.proto + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) + +.PHONY: test-descriptors +test-descriptors: internal/testdata/all.protoset +test-descriptors: internal/testdata/desc_test_complex.protoset +test-descriptors: internal/testdata/desc_test_defaults.protoset +test-descriptors: internal/testdata/desc_test_proto3_optional.protoset +test-descriptors: internal/testdata/descriptor_impl_tests.protoset +test-descriptors: internal/testdata/descriptor_editions_impl_tests.protoset +test-descriptors: internal/testdata/editions/all.protoset +test-descriptors: internal/testdata/source_info.protoset +test-descriptors: internal/testdata/options/options.protoset +test-descriptors: internal/testdata/options/test.protoset +test-descriptors: internal/testdata/options/test_proto3.protoset +test-descriptors: internal/testdata/options/test_editions.protoset + +internal/featuresext/cpp_features.protoset: $(PROTOC) + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) google/protobuf/cpp_features.proto +internal/featuresext/java_features.protoset: $(PROTOC) + cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) google/protobuf/java_features.proto + +.PHONY: ext-features-descriptors +ext-features-descriptors: internal/featuresext/cpp_features.protoset internal/featuresext/java_features.protoset diff --git a/vendor/github.com/bufbuild/protocompile/README.md b/vendor/github.com/bufbuild/protocompile/README.md new file mode 100644 index 00000000..9d873330 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/README.md @@ -0,0 +1,91 @@ +![The Buf logo](./.github/buf-logo.svg) + +# Protocompile + +[![Build](https://github.com/bufbuild/protocompile/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/bufbuild/protocompile/actions/workflows/ci.yaml) +[![Report Card](https://goreportcard.com/badge/github.com/bufbuild/protocompile)](https://goreportcard.com/report/github.com/bufbuild/protocompile) +[![GoDoc](https://pkg.go.dev/badge/github.com/bufbuild/protocompile.svg)](https://pkg.go.dev/github.com/bufbuild/protocompile) + +This repo contains a parsing/linking engine for Protocol Buffers, written in pure Go. It is suitable as an alternative +to `protoc` (Google's official reference compiler for Protocol Buffers). This is the compiler that powers [Buf](https://buf.build) +and its bevy of tools. + +This repo is also the spiritual successor to the [`github.com/jhump/protoreflect/desc/protoparse`](https://godoc.org/github.com/jhump/protoreflect/desc/protoparse) +package. If you are looking for a newer version of `protoparse` that natively works with the newer Protobuf runtime +API for Go (`google.golang.org/protobuf`), you have found it! + +## Protocol Buffers + +If you've come across this repo but don't know what Protocol Buffers are, you might acquaint yourself with the [official +documentation](https://developers.google.com/protocol-buffers). Protocol Buffers, or Protobuf for short, is an IDL for +describing APIs and data structures and also a binary encoding format for efficiently transmitting and storing that +data. + +If you want to know more about the language itself, which is what this repo implements, take a look at Buf's +[Protobuf Guide](https://protobuf.com), which includes a very detailed language specification. + +### Descriptors + +Descriptors are the "lingua franca" for describing Protobuf data schemas. They are the basis of runtime features like +reflection and dynamic messages. They are also the output of a Protobuf compiler: a compiler can produce them and write +them to a file (whose contents are the binary-encoded form of a [`FileDescriptorSet`](https://github.com/protocolbuffers/protobuf/blob/v21.7/src/google/protobuf/descriptor.proto#L55-L59)) +or send them to a [plugin](https://docs.buf.build/reference/images#plugins) to generate code for a particular +programming language. + +Descriptors are similar to nodes in a syntax tree: the contents of a file descriptor correspond closely to the elements +in the source file from which it was generated. Also, the descriptor model's data structures are themselves defined in +[Protobuf](https://github.com/protocolbuffers/protobuf/blob/v21.7/src/google/protobuf/descriptor.proto). + +## Using This Repo + +The primary API of this repo is in this root package: `github.com/bufbuild/protocompile`. This is the suggested entry +point and provides a type named `Compiler`, for compiling Protobuf source files into descriptors. There are also +numerous sub-packages, most of which implement various stages of the compiler. Here's an overview (_not_ in alphabetical +order): + + * [`protocompile`](https://pkg.go.dev/github.com/bufbuild/protocompile): + This is the entry point, used to configure and initiate a compilation operation. + * [`parser`](https://pkg.go.dev/github.com/bufbuild/protocompile/parser): + This is the first stage of the compiler. It parses Protobuf source code and produces an AST. This package can also + generate a file descriptor proto from an AST. + * [`ast`](https://pkg.go.dev/github.com/bufbuild/protocompile/ast): + This package models an Abstract Syntax Tree (AST) for the Protobuf language. + * [`linker`](https://pkg.go.dev/github.com/bufbuild/protocompile/linker): + This is the second stage of the compiler. The descriptor proto (generated from an AST) is linked, producing a more + useful data structure than simple descriptor protos. This step also performs numerous validations on the source, + like making sure that all type references are correct and that sources don't try to define two elements with the same + name. + * [`options`](https://pkg.go.dev/github.com/bufbuild/protocompile/options): + This is the next stage of the compiler: interpreting options. The linked data structures that come from the previous + stage are used to validate and interpret all options. + * [`sourceinfo`](https://pkg.go.dev/github.com/bufbuild/protocompile/sourceinfo): + This is the last stage of the compiler: generating source code info. Source code info contains metadata that maps + elements in the descriptor to the location in the original source file from which it came. This includes access to + comments. In order to provide correct source info for options, it must happen last, after options have been + interpreted. + * [`reporter`](https://pkg.go.dev/github.com/bufbuild/protocompile/reporter): This package provides error types + generated by the compiler and interfaces used by the compiler to report errors and warnings to the calling code. + * [`walk`](https://pkg.go.dev/github.com/bufbuild/protocompile/walk): + This package provides functions for walking through all of the elements in a descriptor (or descriptor proto) + hierarchy. + * [`protoutil`](https://pkg.go.dev/github.com/bufbuild/protocompile/protoutil): + This package contains some other useful functions for interacting with Protobuf descriptors. + +### Migrating from `protoparse` + +There are a few differences between this repo and its predecessor, `github.com/jhump/protoreflect/desc/protoparse`. + +* If you want to include "standard imports", for the well-known files that are included with `protoc`, you have to do + so explicitly. To do this, wrap your resolver using `protocompile.WithStandardImports`. +* If you used `protoparse.FileContentsFromMap`, in this new repo you'll use a `protocompile.SourceResolver` and then use + `protocompile.SourceAccessorFromMap` as its accessor function. +* If you used `Parser.ParseToAST`, you won't use the `protocompile` package but instead directly use `parser.Parse` in + this repo's `parser` sub-package. This returns an AST for the given file contents. +* If you used `Parser.ParseFilesButDoNotLink`, that is still possible in this repo, but not provided directly via a + single function. Instead, you need to take a few steps: + 1. Parse the source using `parser.Parse`. Then use `parser.ResultFromAST` to construct a result that contains a file + descriptor proto. + 2. Interpret whatever options can be interpreted without linking using `options.InterpretUnlinkedOptions`. This may + leave some options in the descriptor proto uninterpreted (including all custom options). + 3. If you want source code info for the file, finally call `sourceinfo.GenerateSourceInfo` using the index returned + from the previous step and store that in the file descriptor proto. diff --git a/vendor/github.com/bufbuild/protocompile/ast/doc.go b/vendor/github.com/bufbuild/protocompile/ast/doc.go new file mode 100644 index 00000000..cda4068c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/doc.go @@ -0,0 +1,75 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast defines types for modeling the AST (Abstract Syntax +// Tree) for the Protocol Buffers interface definition language. +// +// # Nodes +// +// All nodes of the tree implement the [Node] interface. Leaf nodes in the +// tree implement [TerminalNode], and all others implement [CompositeNode]. +// The root of the tree for a proto source file is a *[FileNode]. +// +// A [TerminalNode] represents a single lexical element, or [Token]. A +// [CompositeNode] represents a sub-tree of the AST and range of tokens. +// +// Position information is tracked using a *[FileInfo]. The lexer invokes its +// various Add* methods to add details as the file is tokenized. Storing +// the position information in the *[FileInfo], instead of in each AST node, +// allows the AST to have a much more compact representation. To extract +// detailed position information, you must use the NodeInfo method, available +// on either the *[FileInfo] which produced the node's items or the *[FileNode] +// root of the tree that contains the node. +// +// # Items, Tokens, and Comments +// +// An [Item] represents a lexical item, excluding whitespace. This can be +// either a [Token] or a [Comment]. +// +// Comments are not represented as nodes in the tree. Instead, they are +// attributed to terminal nodes in the tree. So, when lexing, comments +// are accumulated until the next non-comment token is found. The AST +// model in this package thus provides access to all comments in the +// file, regardless of location (unlike the SourceCodeInfo present in +// descriptor protos, which is lossy). The comments associated with a +// non-leaf/non-token node (i.e. a CompositeNode) come from the first +// and last nodes in its sub-tree, for leading and trailing comments +// respectively. +// +// A [Comment] value corresponds to a line ("//") or block ("/*") style +// comment in the source. These have no bearing on the grammar and are +// effectively ignored as the parser is determining the shape of the +// syntax tree. +// +// A [Token] value corresponds to a component of the grammar, that is +// used to produce an AST. They correspond to leaves in the AST (i.e. +// [TerminalNode]). +// +// The *[FileInfo] and *[FileNode] types provide methods for querying +// and iterating through all the items or tokens in the file. They also +// include a method for resolving an [Item] into a [Token] or [Comment]. +// +// # Factory Functions +// +// Creation of AST nodes should use the factory functions in this +// package instead of struct literals. Some factory functions accept +// optional arguments, which means the arguments can be nil. If nil +// values are provided for other (non-optional) arguments, the resulting +// node may be invalid and cause panics later in the program. +// +// This package defines numerous interfaces. However, user code should +// not attempt to implement any of them. Most consumers of an AST will +// not work correctly if they encounter concrete implementations other +// than the ones defined in this package. +package ast diff --git a/vendor/github.com/bufbuild/protocompile/ast/enum.go b/vendor/github.com/bufbuild/protocompile/ast/enum.go new file mode 100644 index 00000000..55a62292 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/enum.go @@ -0,0 +1,185 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// EnumNode represents an enum declaration. Example: +// +// enum Foo { BAR = 0; BAZ = 1 } +type EnumNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []EnumElement + CloseBrace *RuneNode +} + +func (*EnumNode) fileElement() {} +func (*EnumNode) msgElement() {} + +// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid enum, which must have at least one value. +// - keyword: The token corresponding to the "enum" keyword. +// - name: The token corresponding to the enum's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the enum body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid EnumElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &EnumNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + CloseBrace: closeBrace, + Decls: decls, + } +} + +func (n *EnumNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// EnumElement is an interface implemented by all AST nodes that can +// appear in the body of an enum declaration. +type EnumElement interface { + Node + enumElement() +} + +var _ EnumElement = (*OptionNode)(nil) +var _ EnumElement = (*EnumValueNode)(nil) +var _ EnumElement = (*ReservedNode)(nil) +var _ EnumElement = (*EmptyDeclNode)(nil) + +// EnumValueDeclNode is a placeholder interface for AST nodes that represent +// enum values. This allows NoSourceNode to be used in place of *EnumValueNode +// for some usages. +type EnumValueDeclNode interface { + NodeWithOptions + GetName() Node + GetNumber() Node +} + +var _ EnumValueDeclNode = (*EnumValueNode)(nil) +var _ EnumValueDeclNode = (*NoSourceNode)(nil) + +// EnumValueNode represents an enum declaration. Example: +// +// UNSET = 0 [deprecated = true]; +type EnumValueNode struct { + compositeNode + Name *IdentNode + Equals *RuneNode + Number IntValueNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*EnumValueNode) enumElement() {} + +// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil +// except opts which is only non-nil if the declaration included options. +// - name: The token corresponding to the enum value's name. +// - equals: The token corresponding to the '=' rune after the name. +// - number: The token corresponding to the enum value's number. +// - opts: Optional set of enum value options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if number == nil { + panic("number is nil") + } + numChildren := 3 + if semicolon != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name, equals, number) + if opts != nil { + children = append(children, opts) + } + if semicolon != nil { + children = append(children, semicolon) + } + return &EnumValueNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Number: number, + Options: opts, + Semicolon: semicolon, + } +} + +func (e *EnumValueNode) GetName() Node { + return e.Name +} + +func (e *EnumValueNode) GetNumber() Node { + return e.Number +} + +func (e *EnumValueNode) RangeOptions(fn func(*OptionNode) bool) { + for _, opt := range e.Options.Options { + if !fn(opt) { + return + } + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/field.go b/vendor/github.com/bufbuild/protocompile/ast/field.go new file mode 100644 index 00000000..63d65b3a --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/field.go @@ -0,0 +1,795 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// FieldDeclNode is a node in the AST that defines a field. This includes +// normal message fields as well as extensions. There are multiple types +// of AST nodes that declare fields: +// - *FieldNode +// - *GroupNode +// - *MapFieldNode +// - *SyntheticMapField +// +// This also allows NoSourceNode and SyntheticMapField to be used in place of +// one of the above for some usages. +type FieldDeclNode interface { + NodeWithOptions + FieldLabel() Node + FieldName() Node + FieldType() Node + FieldTag() Node + FieldExtendee() Node + GetGroupKeyword() Node + GetOptions() *CompactOptionsNode +} + +var _ FieldDeclNode = (*FieldNode)(nil) +var _ FieldDeclNode = (*GroupNode)(nil) +var _ FieldDeclNode = (*MapFieldNode)(nil) +var _ FieldDeclNode = (*SyntheticMapField)(nil) +var _ FieldDeclNode = (*NoSourceNode)(nil) + +// FieldNode represents a normal field declaration (not groups or maps). It +// can represent extension fields as well as non-extension fields (both inside +// of messages and inside of one-ofs). Example: +// +// optional string foo = 1; +type FieldNode struct { + compositeNode + Label FieldLabel + FldType IdentValueNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode + + // This is an up-link to the containing *ExtendNode for fields + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*FieldNode) msgElement() {} +func (*FieldNode) oneofElement() {} +func (*FieldNode) extendElement() {} + +// NewFieldNode creates a new *FieldNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - fieldType: The token corresponding to the field's type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode { + if fieldType == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + numChildren := 2 + if equals != nil { + numChildren++ + } + if tag != nil { + numChildren++ + } + if semicolon != nil { + numChildren++ + } + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, fieldType, name) + if equals != nil { + children = append(children, equals) + } + if tag != nil { + children = append(children, tag) + } + if opts != nil { + children = append(children, opts) + } + if semicolon != nil { + children = append(children, semicolon) + } + + return &FieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + FldType: fieldType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *FieldNode) FieldLabel() Node { + // proto3 fields and fields inside one-ofs will not have a label and we need + // this check in order to return a nil node -- otherwise we'd return a + // non-nil node that has a nil pointer value in it :/ + if n.Label.KeywordNode == nil { + return nil + } + return n.Label.KeywordNode +} + +func (n *FieldNode) FieldName() Node { + return n.Name +} + +func (n *FieldNode) FieldType() Node { + return n.FldType +} + +func (n *FieldNode) FieldTag() Node { + if n.Tag == nil { + return n + } + return n.Tag +} + +func (n *FieldNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *FieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *FieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *FieldNode) RangeOptions(fn func(*OptionNode) bool) { + for _, opt := range n.Options.Options { + if !fn(opt) { + return + } + } +} + +// FieldLabel represents the label of a field, which indicates its cardinality +// (i.e. whether it is optional, required, or repeated). +type FieldLabel struct { + *KeywordNode + Repeated bool + Required bool +} + +func newFieldLabel(lbl *KeywordNode) FieldLabel { + repeated, required := false, false + if lbl != nil { + repeated = lbl.Val == "repeated" + required = lbl.Val == "required" + } + return FieldLabel{ + KeywordNode: lbl, + Repeated: repeated, + Required: required, + } +} + +// IsPresent returns true if a label keyword was present in the declaration +// and false if it was absent. +func (f *FieldLabel) IsPresent() bool { + return f.KeywordNode != nil +} + +// GroupNode represents a group declaration, which doubles as a field and inline +// message declaration. It can represent extension fields as well as +// non-extension fields (both inside of messages and inside of one-ofs). +// Example: +// +// optional group Key = 4 { +// optional uint64 id = 1; +// optional string name = 2; +// } +type GroupNode struct { + compositeNode + Label FieldLabel + Keyword *KeywordNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + MessageBody + + // This is an up-link to the containing *ExtendNode for groups + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*GroupNode) msgElement() {} +func (*GroupNode) oneofElement() {} +func (*GroupNode) extendElement() {} + +// NewGroupNode creates a new *GroupNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - keyword: The token corresponding to the "group" keyword. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the group body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode { + if keyword == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + numChildren := 4 + len(decls) + if label != nil { + numChildren++ + } + if equals != nil { + numChildren++ + } + if tag != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, keyword, name) + if equals != nil { + children = append(children, equals) + } + if tag != nil { + children = append(children, tag) + } + if opts != nil { + children = append(children, opts) + } + children = append(children, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &GroupNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + Keyword: keyword, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *GroupNode) FieldLabel() Node { + if n.Label.KeywordNode == nil { + // return nil interface to indicate absence, not a typed nil + return nil + } + return n.Label.KeywordNode +} + +func (n *GroupNode) FieldName() Node { + return n.Name +} + +func (n *GroupNode) FieldType() Node { + return n.Keyword +} + +func (n *GroupNode) FieldTag() Node { + if n.Tag == nil { + return n + } + return n.Tag +} + +func (n *GroupNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *GroupNode) GetGroupKeyword() Node { + return n.Keyword +} + +func (n *GroupNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *GroupNode) RangeOptions(fn func(*OptionNode) bool) { + for _, opt := range n.Options.Options { + if !fn(opt) { + return + } + } +} + +func (n *GroupNode) AsMessage() *SyntheticGroupMessageNode { + return (*SyntheticGroupMessageNode)(n) +} + +// SyntheticGroupMessageNode is a view of a GroupNode that implements MessageDeclNode. +// Since a group field implicitly defines a message type, this node represents +// that message type while the corresponding GroupNode represents the field. +// +// This type is considered synthetic since it never appears in a file's AST, but +// is only returned from other accessors (e.g. GroupNode.AsMessage). +type SyntheticGroupMessageNode GroupNode + +func (n *SyntheticGroupMessageNode) MessageName() Node { + return n.Name +} + +func (n *SyntheticGroupMessageNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// OneofDeclNode is a node in the AST that defines a oneof. There are +// multiple types of AST nodes that declare oneofs: +// - *OneofNode +// - *SyntheticOneof +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type OneofDeclNode interface { + NodeWithOptions + OneofName() Node +} + +var _ OneofDeclNode = (*OneofNode)(nil) +var _ OneofDeclNode = (*SyntheticOneof)(nil) +var _ OneofDeclNode = (*NoSourceNode)(nil) + +// OneofNode represents a one-of declaration. Example: +// +// oneof query { +// string by_name = 2; +// Type by_type = 3; +// Address by_address = 4; +// Labels by_label = 5; +// } +type OneofNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []OneofElement + CloseBrace *RuneNode +} + +func (*OneofNode) msgElement() {} + +// NewOneofNode creates a new *OneofNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid oneof, which must have at least one field. +// - keyword: The token corresponding to the "oneof" keyword. +// - name: The token corresponding to the oneof's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the oneof body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewOneofNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneofElement, closeBrace *RuneNode) *OneofNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid OneofElement type: %T", decl)) + } + } + + return &OneofNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *OneofNode) OneofName() Node { + return n.Name +} + +func (n *OneofNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// OneofElement is an interface implemented by all AST nodes that can +// appear in the body of a oneof declaration. +type OneofElement interface { + Node + oneofElement() +} + +var _ OneofElement = (*OptionNode)(nil) +var _ OneofElement = (*FieldNode)(nil) +var _ OneofElement = (*GroupNode)(nil) +var _ OneofElement = (*EmptyDeclNode)(nil) + +// SyntheticOneof is not an actual node in the AST but a synthetic node +// that represents the oneof implied by a proto3 optional field. +// +// This type is considered synthetic since it never appears in a file's AST, +// but is only returned from other functions (e.g. NewSyntheticOneof). +type SyntheticOneof struct { + // The proto3 optional field that implies the presence of this oneof. + Field *FieldNode +} + +var _ Node = (*SyntheticOneof)(nil) + +// NewSyntheticOneof creates a new *SyntheticOneof that corresponds to the +// given proto3 optional field. +func NewSyntheticOneof(field *FieldNode) *SyntheticOneof { + return &SyntheticOneof{Field: field} +} + +func (n *SyntheticOneof) Start() Token { + return n.Field.Start() +} + +func (n *SyntheticOneof) End() Token { + return n.Field.End() +} + +func (n *SyntheticOneof) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticOneof) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticOneof) OneofName() Node { + return n.Field.FieldName() +} + +func (n *SyntheticOneof) RangeOptions(_ func(*OptionNode) bool) { +} + +// MapTypeNode represents the type declaration for a map field. It defines +// both the key and value types for the map. Example: +// +// map +type MapTypeNode struct { + compositeNode + Keyword *KeywordNode + OpenAngle *RuneNode + KeyType *IdentNode + Comma *RuneNode + ValueType IdentValueNode + CloseAngle *RuneNode +} + +// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "map" keyword. +// - openAngle: The token corresponding to the "<" rune after the keyword. +// - keyType: The token corresponding to the key type for the map. +// - comma: The token corresponding to the "," rune between key and value types. +// - valType: The token corresponding to the value type for the map. +// - closeAngle: The token corresponding to the ">" rune that ends the declaration. +func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode { + if keyword == nil { + panic("keyword is nil") + } + if openAngle == nil { + panic("openAngle is nil") + } + if keyType == nil { + panic("keyType is nil") + } + if comma == nil { + panic("comma is nil") + } + if valType == nil { + panic("valType is nil") + } + if closeAngle == nil { + panic("closeAngle is nil") + } + children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle} + return &MapTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + OpenAngle: openAngle, + KeyType: keyType, + Comma: comma, + ValueType: valType, + CloseAngle: closeAngle, + } +} + +// MapFieldNode represents a map field declaration. Example: +// +// map replacements = 3 [deprecated = true]; +type MapFieldNode struct { + compositeNode + MapType *MapTypeNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*MapFieldNode) msgElement() {} + +// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil +// except opts, which may be nil. +// - mapType: The token corresponding to the map type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode { + if mapType == nil { + panic("mapType is nil") + } + if name == nil { + panic("name is nil") + } + numChildren := 2 + if equals != nil { + numChildren++ + } + if tag != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + if semicolon != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, mapType, name) + if equals != nil { + children = append(children, equals) + } + if tag != nil { + children = append(children, tag) + } + if opts != nil { + children = append(children, opts) + } + if semicolon != nil { + children = append(children, semicolon) + } + + return &MapFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + MapType: mapType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *MapFieldNode) FieldLabel() Node { + return nil +} + +func (n *MapFieldNode) FieldName() Node { + return n.Name +} + +func (n *MapFieldNode) FieldType() Node { + return n.MapType +} + +func (n *MapFieldNode) FieldTag() Node { + if n.Tag == nil { + return n + } + return n.Tag +} + +func (n *MapFieldNode) FieldExtendee() Node { + return nil +} + +func (n *MapFieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *MapFieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *MapFieldNode) RangeOptions(fn func(*OptionNode) bool) { + for _, opt := range n.Options.Options { + if !fn(opt) { + return + } + } +} + +func (n *MapFieldNode) AsMessage() *SyntheticMapEntryNode { + return (*SyntheticMapEntryNode)(n) +} + +func (n *MapFieldNode) KeyField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.KeyType, 1) +} + +func (n *MapFieldNode) ValueField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.ValueType, 2) +} + +// SyntheticMapEntryNode is a view of a MapFieldNode that implements MessageDeclNode. +// Since a map field implicitly defines a message type for the map entry, +// this node represents that message type. +// +// This type is considered synthetic since it never appears in a file's AST, but +// is only returned from other accessors (e.g. MapFieldNode.AsMessage). +type SyntheticMapEntryNode MapFieldNode + +func (n *SyntheticMapEntryNode) MessageName() Node { + return n.Name +} + +func (n *SyntheticMapEntryNode) RangeOptions(_ func(*OptionNode) bool) { +} + +// SyntheticMapField is not an actual node in the AST but a synthetic node +// that implements FieldDeclNode. These are used to represent the implicit +// field declarations of the "key" and "value" fields in a map entry. +// +// This type is considered synthetic since it never appears in a file's AST, +// but is only returned from other accessors and functions (e.g. +// MapFieldNode.KeyField, MapFieldNode.ValueField, and NewSyntheticMapField). +type SyntheticMapField struct { + Ident IdentValueNode + Tag *UintLiteralNode +} + +// NewSyntheticMapField creates a new *SyntheticMapField for the given +// identifier (either a key or value type in a map declaration) and tag +// number (1 for key, 2 for value). +func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField { + tag := &UintLiteralNode{ + terminalNode: ident.Start().asTerminalNode(), + Val: tagNum, + } + return &SyntheticMapField{Ident: ident, Tag: tag} +} + +func (n *SyntheticMapField) Start() Token { + return n.Ident.Start() +} + +func (n *SyntheticMapField) End() Token { + return n.Ident.End() +} + +func (n *SyntheticMapField) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) FieldLabel() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldName() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldType() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldTag() Node { + if n.Tag == nil { + return n + } + return n.Tag +} + +func (n *SyntheticMapField) FieldExtendee() Node { + return nil +} + +func (n *SyntheticMapField) GetGroupKeyword() Node { + return nil +} + +func (n *SyntheticMapField) GetOptions() *CompactOptionsNode { + return nil +} + +func (n *SyntheticMapField) RangeOptions(_ func(*OptionNode) bool) { +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/file.go b/vendor/github.com/bufbuild/protocompile/ast/file.go new file mode 100644 index 00000000..50d4ca92 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/file.go @@ -0,0 +1,358 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// FileDeclNode is a placeholder interface for AST nodes that represent files. +// This allows NoSourceNode to be used in place of *FileNode for some usages. +type FileDeclNode interface { + NodeWithOptions + Name() string + NodeInfo(n Node) NodeInfo +} + +var _ FileDeclNode = (*FileNode)(nil) +var _ FileDeclNode = (*NoSourceNode)(nil) + +// FileNode is the root of the AST hierarchy. It represents an entire +// protobuf source file. +type FileNode struct { + compositeNode + fileInfo *FileInfo + + // A file has either a Syntax or Edition node, never both. + // If both are nil, neither declaration is present and the + // file is assumed to use "proto2" syntax. + Syntax *SyntaxNode + Edition *EditionNode + + Decls []FileElement + + // This synthetic node allows access to final comments and whitespace + EOF *RuneNode +} + +// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it +// is absent, it means the file had no syntax declaration. +// +// This function panics if the concrete type of any element of decls is not +// from this package. +func NewFileNode(info *FileInfo, syntax *SyntaxNode, decls []FileElement, eof Token) *FileNode { + return newFileNode(info, syntax, nil, decls, eof) +} + +// NewFileNodeWithEdition creates a new *FileNode. The edition parameter is required. If a file +// has no edition declaration, use NewFileNode instead. +// +// This function panics if the concrete type of any element of decls is not +// from this package. +func NewFileNodeWithEdition(info *FileInfo, edition *EditionNode, decls []FileElement, eof Token) *FileNode { + if edition == nil { + panic("edition is nil") + } + return newFileNode(info, nil, edition, decls, eof) +} + +func newFileNode(info *FileInfo, syntax *SyntaxNode, edition *EditionNode, decls []FileElement, eof Token) *FileNode { + numChildren := len(decls) + 1 + if syntax != nil || edition != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if syntax != nil { + children = append(children, syntax) + } else if edition != nil { + children = append(children, edition) + } + for _, decl := range decls { + switch decl := decl.(type) { + case *PackageNode, *ImportNode, *OptionNode, *MessageNode, + *EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid FileElement type: %T", decl)) + } + children = append(children, decl) + } + + eofNode := NewRuneNode(0, eof) + children = append(children, eofNode) + + return &FileNode{ + compositeNode: compositeNode{ + children: children, + }, + fileInfo: info, + Syntax: syntax, + Edition: edition, + Decls: decls, + EOF: eofNode, + } +} + +// NewEmptyFileNode returns an empty AST for a file with the given name. +func NewEmptyFileNode(filename string) *FileNode { + fileInfo := NewFileInfo(filename, []byte{}) + return NewFileNode(fileInfo, nil, nil, fileInfo.AddToken(0, 0)) +} + +func (f *FileNode) Name() string { + return f.fileInfo.Name() +} + +func (f *FileNode) NodeInfo(n Node) NodeInfo { + return f.fileInfo.NodeInfo(n) +} + +func (f *FileNode) TokenInfo(t Token) NodeInfo { + return f.fileInfo.TokenInfo(t) +} + +func (f *FileNode) ItemInfo(i Item) ItemInfo { + return f.fileInfo.ItemInfo(i) +} + +func (f *FileNode) GetItem(i Item) (Token, Comment) { + return f.fileInfo.GetItem(i) +} + +func (f *FileNode) Items() Sequence[Item] { + return f.fileInfo.Items() +} + +func (f *FileNode) Tokens() Sequence[Token] { + return f.fileInfo.Tokens() +} + +func (f *FileNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range f.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// FileElement is an interface implemented by all AST nodes that are +// allowed as top-level declarations in the file. +type FileElement interface { + Node + fileElement() +} + +var _ FileElement = (*ImportNode)(nil) +var _ FileElement = (*PackageNode)(nil) +var _ FileElement = (*OptionNode)(nil) +var _ FileElement = (*MessageNode)(nil) +var _ FileElement = (*EnumNode)(nil) +var _ FileElement = (*ExtendNode)(nil) +var _ FileElement = (*ServiceNode)(nil) +var _ FileElement = (*EmptyDeclNode)(nil) + +// SyntaxNode represents a syntax declaration, which if present must be +// the first non-comment content. Example: +// +// syntax = "proto2"; +// +// Files that don't have a syntax node are assumed to use proto2 syntax. +type SyntaxNode struct { + compositeNode + Keyword *KeywordNode + Equals *RuneNode + Syntax StringValueNode + Semicolon *RuneNode +} + +// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil: +// - keyword: The token corresponding to the "syntax" keyword. +// - equals: The token corresponding to the "=" rune. +// - syntax: The actual syntax value, e.g. "proto2" or "proto3". +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode { + if keyword == nil { + panic("keyword is nil") + } + if equals == nil { + panic("equals is nil") + } + if syntax == nil { + panic("syntax is nil") + } + var children []Node + if semicolon == nil { + children = []Node{keyword, equals, syntax} + } else { + children = []Node{keyword, equals, syntax, semicolon} + } + return &SyntaxNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Equals: equals, + Syntax: syntax, + Semicolon: semicolon, + } +} + +// EditionNode represents an edition declaration, which if present must be +// the first non-comment content. Example: +// +// edition = "2023"; +// +// Files may include either an edition node or a syntax node, but not both. +// If neither are present, the file is assumed to use proto2 syntax. +type EditionNode struct { + compositeNode + Keyword *KeywordNode + Equals *RuneNode + Edition StringValueNode + Semicolon *RuneNode +} + +// NewEditionNode creates a new *EditionNode. All four arguments must be non-nil: +// - keyword: The token corresponding to the "edition" keyword. +// - equals: The token corresponding to the "=" rune. +// - edition: The actual edition value, e.g. "2023". +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewEditionNode(keyword *KeywordNode, equals *RuneNode, edition StringValueNode, semicolon *RuneNode) *EditionNode { + if keyword == nil { + panic("keyword is nil") + } + if equals == nil { + panic("equals is nil") + } + if edition == nil { + panic("edition is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, equals, edition, semicolon} + return &EditionNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Equals: equals, + Edition: edition, + Semicolon: semicolon, + } +} + +// ImportNode represents an import statement. Example: +// +// import "google/protobuf/empty.proto"; +type ImportNode struct { + compositeNode + Keyword *KeywordNode + // Optional; if present indicates this is a public import + Public *KeywordNode + // Optional; if present indicates this is a weak import + Weak *KeywordNode + Name StringValueNode + Semicolon *RuneNode +} + +// NewImportNode creates a new *ImportNode. The public and weak arguments are optional +// and only one or the other (or neither) may be specified, not both. When public is +// non-nil, it indicates the "public" keyword in the import statement and means this is +// a public import. When weak is non-nil, it indicates the "weak" keyword in the import +// statement and means this is a weak import. When both are nil, this is a normal import. +// The other arguments must be non-nil: +// - keyword: The token corresponding to the "import" keyword. +// - public: The token corresponding to the optional "public" keyword. +// - weak: The token corresponding to the optional "weak" keyword. +// - name: The actual imported file name. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + numChildren := 2 + if semicolon == nil { + numChildren++ + } + if public != nil || weak != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + if public != nil { + children = append(children, public) + } else if weak != nil { + children = append(children, weak) + } + children = append(children, name) + if semicolon != nil { + children = append(children, semicolon) + } + + return &ImportNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Public: public, + Weak: weak, + Name: name, + Semicolon: semicolon, + } +} + +func (*ImportNode) fileElement() {} + +// PackageNode represents a package declaration. Example: +// +// package foobar.com; +type PackageNode struct { + compositeNode + Keyword *KeywordNode + Name IdentValueNode + Semicolon *RuneNode +} + +func (*PackageNode) fileElement() {} + +// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil: +// - keyword: The token corresponding to the "package" keyword. +// - name: The package name declared for the file. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + var children []Node + if semicolon == nil { + children = []Node{keyword, name} + } else { + children = []Node{keyword, name, semicolon} + } + return &PackageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/file_info.go b/vendor/github.com/bufbuild/protocompile/ast/file_info.go new file mode 100644 index 00000000..7c2d9049 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/file_info.go @@ -0,0 +1,701 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "sort" + "unicode/utf8" +) + +// FileInfo contains information about the contents of a source file, including +// details about comments and items. A lexer accumulates these details as it +// scans the file contents. This allows efficient representation of things like +// source positions. +type FileInfo struct { + // The name of the source file. + name string + // The raw contents of the source file. + data []byte + // The offsets for each line in the file. The value is the zero-based byte + // offset for a given line. The line is given by its index. So the value at + // index 0 is the offset for the first line (which is always zero). The + // value at index 1 is the offset at which the second line begins. Etc. + lines []int + // The info for every comment in the file. This is empty if the file has no + // comments. The first entry corresponds to the first comment in the file, + // and so on. + comments []commentInfo + // The info for every lexed item in the file. The last item in the slice + // corresponds to the EOF, so every file (even an empty one) should have at + // least one entry. This includes all terminal symbols (tokens) in the AST + // as well as all comments. + items []itemSpan +} + +type commentInfo struct { + // the index of the item, in the file's items slice, that represents this + // comment + index int + // the index of the token to which this comment is attributed. + attributedToIndex int +} + +type itemSpan struct { + // the offset into the file of the first character of an item. + offset int + // the length of the item + length int +} + +// NewFileInfo creates a new instance for the given file. +func NewFileInfo(filename string, contents []byte) *FileInfo { + return &FileInfo{ + name: filename, + data: contents, + lines: []int{0}, + } +} + +func (f *FileInfo) Name() string { + return f.name +} + +// AddLine adds the offset representing the beginning of the "next" line in the file. +// The first line always starts at offset 0, the second line starts at offset-of-newline-char+1. +func (f *FileInfo) AddLine(offset int) { + if offset < 0 { + panic(fmt.Sprintf("invalid offset: %d must not be negative", offset)) + } + if offset > len(f.data) { + panic(fmt.Sprintf("invalid offset: %d is greater than file size %d", offset, len(f.data))) + } + + if len(f.lines) > 0 { + lastOffset := f.lines[len(f.lines)-1] + if offset <= lastOffset { + panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed line offset %d", offset, lastOffset)) + } + } + + f.lines = append(f.lines, offset) +} + +// AddToken adds info about a token at the given location to this file. It +// returns a value that allows access to all of the token's details. +func (f *FileInfo) AddToken(offset, length int) Token { + if offset < 0 { + panic(fmt.Sprintf("invalid offset: %d must not be negative", offset)) + } + if length < 0 { + panic(fmt.Sprintf("invalid length: %d must not be negative", length)) + } + if offset+length > len(f.data) { + panic(fmt.Sprintf("invalid offset+length: %d is greater than file size %d", offset+length, len(f.data))) + } + + tokenID := len(f.items) + if len(f.items) > 0 { + lastToken := f.items[tokenID-1] + lastEnd := lastToken.offset + lastToken.length - 1 + if offset <= lastEnd { + panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed token end %d", offset, lastEnd)) + } + } + + f.items = append(f.items, itemSpan{offset: offset, length: length}) + return Token(tokenID) +} + +// AddComment adds info about a comment to this file. Comments must first be +// added as items via f.AddToken(). The given comment argument is the Token +// from that step. The given attributedTo argument indicates another token in the +// file with which the comment is associated. If comment's offset is before that +// of attributedTo, then this is a leading comment. Otherwise, it is a trailing +// comment. +func (f *FileInfo) AddComment(comment, attributedTo Token) Comment { + if len(f.comments) > 0 { + lastComment := f.comments[len(f.comments)-1] + if int(comment) <= lastComment.index { + panic(fmt.Sprintf("invalid index: %d is not greater than previously observed comment index %d", comment, lastComment.index)) + } + if int(attributedTo) < lastComment.attributedToIndex { + panic(fmt.Sprintf("invalid attribution: %d is not greater than previously observed comment attribution index %d", attributedTo, lastComment.attributedToIndex)) + } + } + + f.comments = append(f.comments, commentInfo{index: int(comment), attributedToIndex: int(attributedTo)}) + return Comment{ + fileInfo: f, + index: len(f.comments) - 1, + } +} + +// NodeInfo returns details from the original source for the given AST node. +// +// If the given n is out of range, this returns an invalid NodeInfo (i.e. +// nodeInfo.IsValid() returns false). If the given n is not out of range but +// also from a different file than f, then the result is undefined. +func (f *FileInfo) NodeInfo(n Node) NodeInfo { + return f.nodeInfo(int(n.Start()), int(n.End())) +} + +// TokenInfo returns details from the original source for the given token. +// +// If the given t is out of range, this returns an invalid NodeInfo (i.e. +// nodeInfo.IsValid() returns false). If the given t is not out of range but +// also from a different file than f, then the result is undefined. +func (f *FileInfo) TokenInfo(t Token) NodeInfo { + return f.nodeInfo(int(t), int(t)) +} + +func (f *FileInfo) nodeInfo(start, end int) NodeInfo { + if start < 0 || start >= len(f.items) { + return NodeInfo{fileInfo: f} + } + if end < 0 || end >= len(f.items) { + return NodeInfo{fileInfo: f} + } + return NodeInfo{fileInfo: f, startIndex: start, endIndex: end} +} + +// ItemInfo returns details from the original source for the given item. +// +// If the given i is out of range, this returns nil. If the given i is not +// out of range but also from a different file than f, then the result is +// undefined. +func (f *FileInfo) ItemInfo(i Item) ItemInfo { + tok, cmt := f.GetItem(i) + if tok != TokenError { + return f.TokenInfo(tok) + } + if cmt.IsValid() { + return cmt + } + return nil +} + +// GetItem returns the token or comment represented by the given item. Only one +// of the return values will be valid. If the item is a token then the returned +// comment will be a zero value and thus invalid (i.e. comment.IsValid() returns +// false). If the item is a comment then the returned token will be TokenError. +// +// If the given i is out of range, this returns (TokenError, Comment{}). If the +// given i is not out of range but also from a different file than f, then +// the result is undefined. +func (f *FileInfo) GetItem(i Item) (Token, Comment) { + if i < 0 || int(i) >= len(f.items) { + return TokenError, Comment{} + } + if !f.isComment(i) { + return Token(i), Comment{} + } + // It's a comment, so find its location in f.comments + c := sort.Search(len(f.comments), func(c int) bool { + return f.comments[c].index >= int(i) + }) + if c < len(f.comments) && f.comments[c].index == int(i) { + return TokenError, Comment{fileInfo: f, index: c} + } + // f.isComment(i) returned true, but we couldn't find it + // in f.comments? Uh oh... that shouldn't be possible. + return TokenError, Comment{} +} + +func (f *FileInfo) isDummyFile() bool { + return f == nil || f.lines == nil +} + +// Sequence represents a navigable sequence of elements. +type Sequence[T any] interface { + // First returns the first element in the sequence. The bool return + // is false if this sequence contains no elements. For example, an + // empty file has no items or tokens. + First() (T, bool) + // Next returns the next element in the sequence that comes after + // the given element. The bool returns is false if there is no next + // item (i.e. the given element is the last one). It also returns + // false if the given element is invalid. + Next(T) (T, bool) + // Last returns the last element in the sequence. The bool return + // is false if this sequence contains no elements. For example, an + // empty file has no items or tokens. + Last() (T, bool) + // Previous returns the previous element in the sequence that comes + // before the given element. The bool returns is false if there is no + // previous item (i.e. the given element is the first one). It also + // returns false if the given element is invalid. + Previous(T) (T, bool) +} + +func (f *FileInfo) Items() Sequence[Item] { + return items{fileInfo: f} +} + +func (f *FileInfo) Tokens() Sequence[Token] { + return tokens{fileInfo: f} +} + +type items struct { + fileInfo *FileInfo +} + +func (i items) First() (Item, bool) { + if len(i.fileInfo.items) == 0 { + return 0, false + } + return 0, true +} + +func (i items) Next(item Item) (Item, bool) { + if item < 0 || int(item) >= len(i.fileInfo.items)-1 { + return 0, false + } + return i.fileInfo.itemForward(item+1, true) +} + +func (i items) Last() (Item, bool) { + if len(i.fileInfo.items) == 0 { + return 0, false + } + return Item(len(i.fileInfo.items) - 1), true +} + +func (i items) Previous(item Item) (Item, bool) { + if item <= 0 || int(item) >= len(i.fileInfo.items) { + return 0, false + } + return i.fileInfo.itemBackward(item-1, true) +} + +type tokens struct { + fileInfo *FileInfo +} + +func (t tokens) First() (Token, bool) { + i, ok := t.fileInfo.itemForward(0, false) + return Token(i), ok +} + +func (t tokens) Next(tok Token) (Token, bool) { + if tok < 0 || int(tok) >= len(t.fileInfo.items)-1 { + return 0, false + } + i, ok := t.fileInfo.itemForward(Item(tok+1), false) + return Token(i), ok +} + +func (t tokens) Last() (Token, bool) { + i, ok := t.fileInfo.itemBackward(Item(len(t.fileInfo.items))-1, false) + return Token(i), ok +} + +func (t tokens) Previous(tok Token) (Token, bool) { + if tok <= 0 || int(tok) >= len(t.fileInfo.items) { + return 0, false + } + i, ok := t.fileInfo.itemBackward(Item(tok-1), false) + return Token(i), ok +} + +func (f *FileInfo) itemForward(i Item, allowComment bool) (Item, bool) { + end := Item(len(f.items)) + for i < end { + if allowComment || !f.isComment(i) { + return i, true + } + i++ + } + return 0, false +} + +func (f *FileInfo) itemBackward(i Item, allowComment bool) (Item, bool) { + for i >= 0 { + if allowComment || !f.isComment(i) { + return i, true + } + i-- + } + return 0, false +} + +// isComment is comment returns true if i refers to a comment. +// (If it returns false, i refers to a token.) +func (f *FileInfo) isComment(i Item) bool { + item := f.items[i] + if item.length < 2 { + return false + } + // see if item text starts with "//" or "/*" + if f.data[item.offset] != '/' { + return false + } + c := f.data[item.offset+1] + return c == '/' || c == '*' +} + +func (f *FileInfo) SourcePos(offset int) SourcePos { + lineNumber := sort.Search(len(f.lines), func(n int) bool { + return f.lines[n] > offset + }) + + // If it weren't for tabs and multibyte unicode characters, we + // could trivially compute the column just based on offset and the + // starting offset of lineNumber :( + // Wish this were more efficient... that would require also storing + // computed line+column information, which would triple the size of + // f's items slice... + col := 0 + for i := f.lines[lineNumber-1]; i < offset; i++ { + if f.data[i] == '\t' { + nextTabStop := 8 - (col % 8) + col += nextTabStop + } else if utf8.RuneStart(f.data[i]) { + col++ + } + } + + return SourcePos{ + Filename: f.name, + Offset: offset, + Line: lineNumber, + // Columns are 1-indexed in this AST + Col: col + 1, + } +} + +// Token represents a single lexed token. +type Token int + +// TokenError indicates an invalid token. It is returned from query +// functions when no valid token satisfies the request. +const TokenError = Token(-1) + +// AsItem returns the Item that corresponds to t. +func (t Token) AsItem() Item { + return Item(t) +} + +func (t Token) asTerminalNode() terminalNode { + return terminalNode(t) +} + +// Item represents an item lexed from source. It represents either +// a Token or a Comment. +type Item int + +// ItemInfo provides details about an item's location in the source file and +// its contents. +type ItemInfo interface { + SourceSpan + LeadingWhitespace() string + RawText() string +} + +// NodeInfo represents the details for a node or token in the source file's AST. +// It provides access to information about the node's location in the source +// file. It also provides access to the original text in the source file (with +// all the original formatting intact) and also provides access to surrounding +// comments. +type NodeInfo struct { + fileInfo *FileInfo + startIndex, endIndex int +} + +var _ ItemInfo = NodeInfo{} + +// IsValid returns true if this node info is valid. If n is a zero-value struct, +// it is not valid. +func (n NodeInfo) IsValid() bool { + return n.fileInfo != nil +} + +// Start returns the starting position of the element. This is the first +// character of the node or token. +func (n NodeInfo) Start() SourcePos { + if n.fileInfo.isDummyFile() { + return UnknownPos(n.fileInfo.name) + } + + tok := n.fileInfo.items[n.startIndex] + return n.fileInfo.SourcePos(tok.offset) +} + +// End returns the ending position of the element, exclusive. This is the +// location after the last character of the node or token. If n returns +// the same position for Start() and End(), the element in source had a +// length of zero (which should only happen for the special EOF token +// that designates the end of the file). +func (n NodeInfo) End() SourcePos { + if n.fileInfo.isDummyFile() { + return UnknownPos(n.fileInfo.name) + } + + tok := n.fileInfo.items[n.endIndex] + // find offset of last character in the span + offset := tok.offset + if tok.length > 0 { + offset += tok.length - 1 + } + pos := n.fileInfo.SourcePos(offset) + if tok.length > 0 { + // We return "open range", so end is the position *after* the + // last character in the span. So we adjust + pos.Col++ + } + return pos +} + +// LeadingWhitespace returns any whitespace prior to the element. If there +// were comments in between this element and the previous one, this will +// return the whitespace between the last such comment in the element. If +// there were no such comments, this returns the whitespace between the +// previous element and the current one. +func (n NodeInfo) LeadingWhitespace() string { + if n.fileInfo.isDummyFile() { + return "" + } + + tok := n.fileInfo.items[n.startIndex] + var prevEnd int + if n.startIndex > 0 { + prevTok := n.fileInfo.items[n.startIndex-1] + prevEnd = prevTok.offset + prevTok.length + } + return string(n.fileInfo.data[prevEnd:tok.offset]) +} + +// LeadingComments returns all comments in the source that exist between the +// element and the previous element, except for any trailing comment on the +// previous element. +func (n NodeInfo) LeadingComments() Comments { + if n.fileInfo.isDummyFile() { + return EmptyComments + } + + start := sort.Search(len(n.fileInfo.comments), func(i int) bool { + return n.fileInfo.comments[i].attributedToIndex >= n.startIndex + }) + + if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.startIndex { + // no comments associated with this token + return EmptyComments + } + + numComments := 0 + for i := start; i < len(n.fileInfo.comments); i++ { + comment := n.fileInfo.comments[i] + if comment.attributedToIndex == n.startIndex && + comment.index < n.startIndex { + numComments++ + } else { + break + } + } + + return Comments{ + fileInfo: n.fileInfo, + first: start, + num: numComments, + } +} + +// TrailingComments returns the trailing comment for the element, if any. +// An element will have a trailing comment only if it is the last token +// on a line and is followed by a comment on the same line. Typically, the +// following comment is a line-style comment (starting with "//"). +// +// If the following comment is a block-style comment that spans multiple +// lines, and the next token is on the same line as the end of the comment, +// the comment is NOT considered a trailing comment. +// +// Examples: +// +// foo // this is a trailing comment for foo +// +// bar /* this is a trailing comment for bar */ +// +// baz /* this is a trailing +// comment for baz */ +// +// fizz /* this is NOT a trailing +// comment for fizz because +// its on the same line as the +// following token buzz */ buzz +func (n NodeInfo) TrailingComments() Comments { + if n.fileInfo.isDummyFile() { + return EmptyComments + } + + start := sort.Search(len(n.fileInfo.comments), func(i int) bool { + comment := n.fileInfo.comments[i] + return comment.attributedToIndex >= n.endIndex && + comment.index > n.endIndex + }) + + if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.endIndex { + // no comments associated with this token + return EmptyComments + } + + numComments := 0 + for i := start; i < len(n.fileInfo.comments); i++ { + comment := n.fileInfo.comments[i] + if comment.attributedToIndex == n.endIndex { + numComments++ + } else { + break + } + } + + return Comments{ + fileInfo: n.fileInfo, + first: start, + num: numComments, + } +} + +// RawText returns the actual text in the source file that corresponds to the +// element. If the element is a node in the AST that encompasses multiple +// items (like an entire declaration), the full text of all items is returned +// including any interior whitespace and comments. +func (n NodeInfo) RawText() string { + startTok := n.fileInfo.items[n.startIndex] + endTok := n.fileInfo.items[n.endIndex] + return string(n.fileInfo.data[startTok.offset : endTok.offset+endTok.length]) +} + +// SourcePos identifies a location in a proto source file. +type SourcePos struct { + Filename string + // The line and column numbers for this position. These are + // one-based, so the first line and column is 1 (not zero). If + // either is zero, then the line and column are unknown and + // only the file name is known. + Line, Col int + // The offset, in bytes, from the beginning of the file. This + // is zero-based: the first character in the file is offset zero. + Offset int +} + +func (pos SourcePos) String() string { + if pos.Line <= 0 || pos.Col <= 0 { + return pos.Filename + } + return fmt.Sprintf("%s:%d:%d", pos.Filename, pos.Line, pos.Col) +} + +// SourceSpan represents a range of source positions. +type SourceSpan interface { + Start() SourcePos + End() SourcePos +} + +// NewSourceSpan creates a new span that covers the given range. +func NewSourceSpan(start SourcePos, end SourcePos) SourceSpan { + return sourceSpan{StartPos: start, EndPos: end} +} + +type sourceSpan struct { + StartPos SourcePos + EndPos SourcePos +} + +func (p sourceSpan) Start() SourcePos { + return p.StartPos +} + +func (p sourceSpan) End() SourcePos { + return p.EndPos +} + +var _ SourceSpan = sourceSpan{} + +// Comments represents a range of sequential comments in a source file +// (e.g. no interleaving items or AST nodes). +type Comments struct { + fileInfo *FileInfo + first, num int +} + +// EmptyComments is an empty set of comments. +var EmptyComments = Comments{} + +// Len returns the number of comments in c. +func (c Comments) Len() int { + return c.num +} + +func (c Comments) Index(i int) Comment { + if i < 0 || i >= c.num { + panic(fmt.Sprintf("index %d out of range (len = %d)", i, c.num)) + } + return Comment{ + fileInfo: c.fileInfo, + index: c.first + i, + } +} + +// Comment represents a single comment in a source file. It indicates +// the position of the comment and its contents. A single comment means +// one line-style comment ("//" to end of line) or one block comment +// ("/*" through "*/"). If a longer comment uses multiple line comments, +// each line is considered to be a separate comment. For example: +// +// // This is a single comment, and +// // this is a separate comment. +type Comment struct { + fileInfo *FileInfo + index int +} + +var _ ItemInfo = Comment{} + +// IsValid returns true if this comment is valid. If this comment is +// a zero-value struct, it is not valid. +func (c Comment) IsValid() bool { + return c.fileInfo != nil && c.index >= 0 +} + +// AsItem returns the Item that corresponds to c. +func (c Comment) AsItem() Item { + return Item(c.fileInfo.comments[c.index].index) +} + +func (c Comment) Start() SourcePos { + span := c.fileInfo.items[c.AsItem()] + return c.fileInfo.SourcePos(span.offset) +} + +func (c Comment) End() SourcePos { + span := c.fileInfo.items[c.AsItem()] + return c.fileInfo.SourcePos(span.offset + span.length - 1) +} + +func (c Comment) LeadingWhitespace() string { + item := c.AsItem() + span := c.fileInfo.items[item] + var prevEnd int + if item > 0 { + prevItem := c.fileInfo.items[item-1] + prevEnd = prevItem.offset + prevItem.length + } + return string(c.fileInfo.data[prevEnd:span.offset]) +} + +func (c Comment) RawText() string { + span := c.fileInfo.items[c.AsItem()] + return string(c.fileInfo.data[span.offset : span.offset+span.length]) +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/identifiers.go b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go new file mode 100644 index 00000000..511389d7 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go @@ -0,0 +1,153 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "strings" +) + +// Identifier is a possibly-qualified name. This is used to distinguish +// ValueNode values that are references/identifiers vs. those that are +// string literals. +type Identifier string + +// IdentValueNode is an AST node that represents an identifier. +type IdentValueNode interface { + ValueNode + AsIdentifier() Identifier +} + +var _ IdentValueNode = (*IdentNode)(nil) +var _ IdentValueNode = (*CompoundIdentNode)(nil) + +// IdentNode represents a simple, unqualified identifier. These are used to name +// elements declared in a protobuf file or to refer to elements. Example: +// +// foobar +type IdentNode struct { + terminalNode + Val string +} + +// NewIdentNode creates a new *IdentNode. The given val is the identifier text. +func NewIdentNode(val string, tok Token) *IdentNode { + return &IdentNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *IdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *IdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// ToKeyword is used to convert identifiers to keywords. Since keywords are not +// reserved in the protobuf language, they are initially lexed as identifiers +// and then converted to keywords based on context. +func (n *IdentNode) ToKeyword() *KeywordNode { + return (*KeywordNode)(n) +} + +// CompoundIdentNode represents a qualified identifier. A qualified identifier +// has at least one dot and possibly multiple identifier names (all separated by +// dots). If the identifier has a leading dot, then it is a *fully* qualified +// identifier. Example: +// +// .com.foobar.Baz +type CompoundIdentNode struct { + compositeNode + // Optional leading dot, indicating that the identifier is fully qualified. + LeadingDot *RuneNode + Components []*IdentNode + // Dots[0] is the dot after Components[0]. The length of Dots is always + // one less than the length of Components. + Dots []*RuneNode + // The text value of the identifier, with all components and dots + // concatenated. + Val string +} + +// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil. +// The dots arg must have a length that is one less than the length of +// components. The components arg must not be empty. +func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode { + if len(components) == 0 { + panic("must have at least one component") + } + if len(dots) != len(components)-1 && len(dots) != len(components) { + panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots))) + } + numChildren := len(components) + len(dots) + if leadingDot != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + var b strings.Builder + if leadingDot != nil { + children = append(children, leadingDot) + b.WriteRune(leadingDot.Rune) + } + for i, comp := range components { + if i > 0 { + dot := dots[i-1] + children = append(children, dot) + b.WriteRune(dot.Rune) + } + children = append(children, comp) + b.WriteString(comp.Val) + } + if len(dots) == len(components) { + dot := dots[len(dots)-1] + children = append(children, dot) + b.WriteRune(dot.Rune) + } + return &CompoundIdentNode{ + compositeNode: compositeNode{ + children: children, + }, + LeadingDot: leadingDot, + Components: components, + Dots: dots, + Val: b.String(), + } +} + +func (n *CompoundIdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *CompoundIdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// KeywordNode is an AST node that represents a keyword. Keywords are +// like identifiers, but they have special meaning in particular contexts. +// Example: +// +// message +type KeywordNode IdentNode + +// NewKeywordNode creates a new *KeywordNode. The given val is the keyword. +func NewKeywordNode(val string, tok Token) *KeywordNode { + return &KeywordNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/message.go b/vendor/github.com/bufbuild/protocompile/ast/message.go new file mode 100644 index 00000000..eede28ed --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/message.go @@ -0,0 +1,223 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// MessageDeclNode is a node in the AST that defines a message type. This +// includes normal message fields as well as implicit messages: +// - *MessageNode +// - *SyntheticGroupMessageNode (the group is a field and inline message type) +// - *SyntheticMapEntryNode (map fields implicitly define a MapEntry message type) +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type MessageDeclNode interface { + NodeWithOptions + MessageName() Node +} + +var _ MessageDeclNode = (*MessageNode)(nil) +var _ MessageDeclNode = (*SyntheticGroupMessageNode)(nil) +var _ MessageDeclNode = (*SyntheticMapEntryNode)(nil) +var _ MessageDeclNode = (*NoSourceNode)(nil) + +// MessageNode represents a message declaration. Example: +// +// message Foo { +// string name = 1; +// repeated string labels = 2; +// bytes extra = 3; +// } +type MessageNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + MessageBody +} + +func (*MessageNode) fileElement() {} +func (*MessageNode) msgElement() {} + +// NewMessageNode creates a new *MessageNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "message" keyword. +// - name: The token corresponding to the field's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &MessageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *MessageNode) MessageName() Node { + return n.Name +} + +func (n *MessageNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// MessageBody represents the body of a message. It is used by both +// MessageNodes and GroupNodes. +type MessageBody struct { + OpenBrace *RuneNode + Decls []MessageElement + CloseBrace *RuneNode +} + +func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) { + m.OpenBrace = openBrace + m.Decls = decls + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneofNode, + *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode, + *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid MessageElement type: %T", decl)) + } + } + m.CloseBrace = closeBrace +} + +// MessageElement is an interface implemented by all AST nodes that can +// appear in a message body. +type MessageElement interface { + Node + msgElement() +} + +var _ MessageElement = (*OptionNode)(nil) +var _ MessageElement = (*FieldNode)(nil) +var _ MessageElement = (*MapFieldNode)(nil) +var _ MessageElement = (*OneofNode)(nil) +var _ MessageElement = (*GroupNode)(nil) +var _ MessageElement = (*MessageNode)(nil) +var _ MessageElement = (*EnumNode)(nil) +var _ MessageElement = (*ExtendNode)(nil) +var _ MessageElement = (*ExtensionRangeNode)(nil) +var _ MessageElement = (*ReservedNode)(nil) +var _ MessageElement = (*EmptyDeclNode)(nil) + +// ExtendNode represents a declaration of extension fields. Example: +// +// extend google.protobuf.FieldOptions { +// bool redacted = 33333; +// } +type ExtendNode struct { + compositeNode + Keyword *KeywordNode + Extendee IdentValueNode + OpenBrace *RuneNode + Decls []ExtendElement + CloseBrace *RuneNode +} + +func (*ExtendNode) fileElement() {} +func (*ExtendNode) msgElement() {} + +// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "extend" keyword. +// - extendee: The token corresponding to the name of the extended message. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode { + if keyword == nil { + panic("keyword is nil") + } + if extendee == nil { + panic("extendee is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, extendee, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &ExtendNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Extendee: extendee, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } + for _, decl := range decls { + switch decl := decl.(type) { + case *FieldNode: + decl.Extendee = ret + case *GroupNode: + decl.Extendee = ret + case *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ExtendElement type: %T", decl)) + } + } + return ret +} + +// ExtendElement is an interface implemented by all AST nodes that can +// appear in the body of an extends declaration. +type ExtendElement interface { + Node + extendElement() +} + +var _ ExtendElement = (*FieldNode)(nil) +var _ ExtendElement = (*GroupNode)(nil) +var _ ExtendElement = (*EmptyDeclNode)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/ast/no_source.go b/vendor/github.com/bufbuild/protocompile/ast/no_source.go new file mode 100644 index 00000000..44dbb714 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/no_source.go @@ -0,0 +1,142 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// UnknownPos is a placeholder position when only the source file +// name is known. +func UnknownPos(filename string) SourcePos { + return SourcePos{Filename: filename} +} + +// UnknownSpan is a placeholder span when only the source file +// name is known. +func UnknownSpan(filename string) SourceSpan { + return unknownSpan{filename: filename} +} + +type unknownSpan struct { + filename string +} + +func (s unknownSpan) Start() SourcePos { + return UnknownPos(s.filename) +} + +func (s unknownSpan) End() SourcePos { + return UnknownPos(s.filename) +} + +// NoSourceNode is a placeholder AST node that implements numerous +// interfaces in this package. It can be used to represent an AST +// element for a file whose source is not available. +type NoSourceNode FileInfo + +// NewNoSourceNode creates a new NoSourceNode for the given filename. +func NewNoSourceNode(filename string) *NoSourceNode { + return &NoSourceNode{name: filename} +} + +func (n *NoSourceNode) Name() string { + return n.name +} + +func (n *NoSourceNode) Start() Token { + return 0 +} + +func (n *NoSourceNode) End() Token { + return 0 +} + +func (n *NoSourceNode) NodeInfo(Node) NodeInfo { + return NodeInfo{ + fileInfo: (*FileInfo)(n), + } +} + +func (n *NoSourceNode) GetSyntax() Node { + return n +} + +func (n *NoSourceNode) GetName() Node { + return n +} + +func (n *NoSourceNode) GetValue() ValueNode { + return n +} + +func (n *NoSourceNode) FieldLabel() Node { + return n +} + +func (n *NoSourceNode) FieldName() Node { + return n +} + +func (n *NoSourceNode) FieldType() Node { + return n +} + +func (n *NoSourceNode) FieldTag() Node { + return n +} + +func (n *NoSourceNode) FieldExtendee() Node { + return n +} + +func (n *NoSourceNode) GetGroupKeyword() Node { + return n +} + +func (n *NoSourceNode) GetOptions() *CompactOptionsNode { + return nil +} + +func (n *NoSourceNode) RangeStart() Node { + return n +} + +func (n *NoSourceNode) RangeEnd() Node { + return n +} + +func (n *NoSourceNode) GetNumber() Node { + return n +} + +func (n *NoSourceNode) MessageName() Node { + return n +} + +func (n *NoSourceNode) OneofName() Node { + return n +} + +func (n *NoSourceNode) GetInputType() Node { + return n +} + +func (n *NoSourceNode) GetOutputType() Node { + return n +} + +func (n *NoSourceNode) Value() interface{} { + return nil +} + +func (n *NoSourceNode) RangeOptions(func(*OptionNode) bool) { +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/node.go b/vendor/github.com/bufbuild/protocompile/ast/node.go new file mode 100644 index 00000000..abb76430 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/node.go @@ -0,0 +1,139 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// Node is the interface implemented by all nodes in the AST. It +// provides information about the span of this AST node in terms +// of location in the source file. It also provides information +// about all prior comments (attached as leading comments) and +// optional subsequent comments (attached as trailing comments). +type Node interface { + Start() Token + End() Token +} + +// TerminalNode represents a leaf in the AST. These represent +// the items/lexemes in the protobuf language. Comments and +// whitespace are accumulated by the lexer and associated with +// the following lexed token. +type TerminalNode interface { + Node + Token() Token +} + +var _ TerminalNode = (*StringLiteralNode)(nil) +var _ TerminalNode = (*UintLiteralNode)(nil) +var _ TerminalNode = (*FloatLiteralNode)(nil) +var _ TerminalNode = (*IdentNode)(nil) +var _ TerminalNode = (*SpecialFloatLiteralNode)(nil) +var _ TerminalNode = (*KeywordNode)(nil) +var _ TerminalNode = (*RuneNode)(nil) + +// CompositeNode represents any non-terminal node in the tree. These +// are interior or root nodes and have child nodes. +type CompositeNode interface { + Node + // Children contains all AST nodes that are immediate children of this one. + Children() []Node +} + +// terminalNode contains bookkeeping shared by all TerminalNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the TerminalNode +// interface. +type terminalNode Token + +func (n terminalNode) Start() Token { + return Token(n) +} + +func (n terminalNode) End() Token { + return Token(n) +} + +func (n terminalNode) Token() Token { + return Token(n) +} + +// compositeNode contains bookkeeping shared by all CompositeNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the CompositeNode +// interface. +type compositeNode struct { + children []Node +} + +func (n *compositeNode) Children() []Node { + return n.children +} + +func (n *compositeNode) Start() Token { + return n.children[0].Start() +} + +func (n *compositeNode) End() Token { + return n.children[len(n.children)-1].End() +} + +// RuneNode represents a single rune in protobuf source. Runes +// are typically collected into items, but some runes stand on +// their own, such as punctuation/symbols like commas, semicolons, +// equals signs, open and close symbols (braces, brackets, angles, +// and parentheses), and periods/dots. +// TODO: make this more compact; if runes don't have attributed comments +// then we don't need a Token to represent them and only need an offset +// into the file's contents. +type RuneNode struct { + terminalNode + Rune rune +} + +// NewRuneNode creates a new *RuneNode with the given properties. +func NewRuneNode(r rune, tok Token) *RuneNode { + return &RuneNode{ + terminalNode: tok.asTerminalNode(), + Rune: r, + } +} + +// EmptyDeclNode represents an empty declaration in protobuf source. +// These amount to extra semicolons, with no actual content preceding +// the semicolon. +type EmptyDeclNode struct { + compositeNode + Semicolon *RuneNode +} + +// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must +// be non-nil. +func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode { + if semicolon == nil { + panic("semicolon is nil") + } + return &EmptyDeclNode{ + compositeNode: compositeNode{ + children: []Node{semicolon}, + }, + Semicolon: semicolon, + } +} + +func (e *EmptyDeclNode) fileElement() {} +func (e *EmptyDeclNode) msgElement() {} +func (e *EmptyDeclNode) extendElement() {} +func (e *EmptyDeclNode) oneofElement() {} +func (e *EmptyDeclNode) enumElement() {} +func (e *EmptyDeclNode) serviceElement() {} +func (e *EmptyDeclNode) methodElement() {} diff --git a/vendor/github.com/bufbuild/protocompile/ast/options.go b/vendor/github.com/bufbuild/protocompile/ast/options.go new file mode 100644 index 00000000..be31f0b4 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/options.go @@ -0,0 +1,413 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// OptionDeclNode is a placeholder interface for AST nodes that represent +// options. This allows NoSourceNode to be used in place of *OptionNode +// for some usages. +type OptionDeclNode interface { + Node + GetName() Node + GetValue() ValueNode +} + +var _ OptionDeclNode = (*OptionNode)(nil) +var _ OptionDeclNode = (*NoSourceNode)(nil) + +// OptionNode represents the declaration of a single option for an element. +// It is used both for normal option declarations (start with "option" keyword +// and end with semicolon) and for compact options found in fields, enum values, +// and extension ranges. Example: +// +// option (custom.option) = "foo"; +type OptionNode struct { + compositeNode + Keyword *KeywordNode // absent for compact options + Name *OptionNameNode + Equals *RuneNode + Val ValueNode + Semicolon *RuneNode // absent for compact options +} + +func (*OptionNode) fileElement() {} +func (*OptionNode) msgElement() {} +func (*OptionNode) oneofElement() {} +func (*OptionNode) enumElement() {} +func (*OptionNode) serviceElement() {} +func (*OptionNode) methodElement() {} + +// NewOptionNode creates a new *OptionNode for a full option declaration (as +// used in files, messages, oneofs, enums, services, and methods). All arguments +// must be non-nil. (Also see NewCompactOptionNode.) +// - keyword: The token corresponding to the "option" keyword. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + var children []Node + if semicolon == nil { + children = []Node{keyword, name, equals, val} + } else { + children = []Node{keyword, name, equals, val, semicolon} + } + + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Equals: equals, + Val: val, + Semicolon: semicolon, + } +} + +// NewCompactOptionNode creates a new *OptionNode for a full compact declaration +// (as used in fields, enum values, and extension ranges). All arguments must be +// non-nil. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode { + if name == nil { + panic("name is nil") + } + if equals == nil && val != nil { + panic("equals is nil but val is not") + } + if val == nil && equals != nil { + panic("val is nil but equals is not") + } + var children []Node + if equals == nil && val == nil { + children = []Node{name} + } else { + children = []Node{name, equals, val} + } + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Val: val, + } +} + +func (n *OptionNode) GetName() Node { + return n.Name +} + +func (n *OptionNode) GetValue() ValueNode { + return n.Val +} + +// OptionNameNode represents an option name or even a traversal through message +// types to name a nested option field. Example: +// +// (foo.bar).baz.(bob) +type OptionNameNode struct { + compositeNode + Parts []*FieldReferenceNode + // Dots represent the separating '.' characters between name parts. The + // length of this slice must be exactly len(Parts)-1, each item in Parts + // having a corresponding item in this slice *except the last* (since a + // trailing dot is not allowed). + // + // These do *not* include dots that are inside of an extension name. For + // example: (foo.bar).baz.(bob) has three parts: + // 1. (foo.bar) - an extension name + // 2. baz - a regular field in foo.bar + // 3. (bob) - an extension field in baz + // Note that the dot in foo.bar will thus not be present in Dots but is + // instead in Parts[0]. + Dots []*RuneNode +} + +// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a +// length that is one less than the length of parts. The parts arg must not be +// empty. +func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode { + if len(parts) == 0 { + panic("must have at least one part") + } + if len(dots) != len(parts)-1 && len(dots) != len(parts) { + panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots))) + } + children := make([]Node, 0, len(parts)+len(dots)) + for i, part := range parts { + if part == nil { + panic(fmt.Sprintf("parts[%d] is nil", i)) + } + if i > 0 { + if dots[i-1] == nil { + panic(fmt.Sprintf("dots[%d] is nil", i-1)) + } + children = append(children, dots[i-1]) + } + children = append(children, part) + } + if len(dots) == len(parts) { // Add the erroneous, but tolerated trailing dot. + if dots[len(dots)-1] == nil { + panic(fmt.Sprintf("dots[%d] is nil", len(dots)-1)) + } + children = append(children, dots[len(dots)-1]) + } + return &OptionNameNode{ + compositeNode: compositeNode{ + children: children, + }, + Parts: parts, + Dots: dots, + } +} + +// FieldReferenceNode is a reference to a field name. It can indicate a regular +// field (simple unqualified name), an extension field (possibly-qualified name +// that is enclosed either in brackets or parentheses), or an "any" type +// reference (a type URL in the form "server.host/fully.qualified.Name" that is +// enclosed in brackets). +// +// Extension names are used in options to refer to custom options (which are +// actually extensions), in which case the name is enclosed in parentheses "(" +// and ")". They can also be used to refer to extension fields of options. +// +// Extension names are also used in message literals to set extension fields, +// in which case the name is enclosed in square brackets "[" and "]". +// +// "Any" type references can only be used in message literals, and are not +// allowed in option names. They are always enclosed in square brackets. An +// "any" type reference is distinguished from an extension name by the presence +// of a slash, which must be present in an "any" type reference and must be +// absent in an extension name. +// +// Examples: +// +// foobar +// (foo.bar) +// [foo.bar] +// [type.googleapis.com/foo.bar] +type FieldReferenceNode struct { + compositeNode + Open *RuneNode // only present for extension names and "any" type references + + // only present for "any" type references + URLPrefix IdentValueNode + Slash *RuneNode + + Name IdentValueNode + + Close *RuneNode // only present for extension names and "any" type references +} + +// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field. +// The name arg must not be nil. +func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + children := []Node{name} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + } +} + +// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an +// extension field. All args must be non-nil. The openSym and closeSym runes +// should be "(" and ")" or "[" and "]". +func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + children := []Node{openSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Name: name, + Close: closeSym, + } +} + +// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any" +// type reference. All args must be non-nil. The openSym and closeSym runes +// should be "[" and "]". The slashSym run should be "/". +func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if urlPrefix == nil { + panic("urlPrefix is nil") + } + if slashSym == nil { + panic("slashSym is nil") + } + children := []Node{openSym, urlPrefix, slashSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + URLPrefix: urlPrefix, + Slash: slashSym, + Name: name, + Close: closeSym, + } +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsExtension() bool { + return a.Open != nil && a.Slash == nil +} + +// IsAnyTypeReference reports if this is an Any type reference. +func (a *FieldReferenceNode) IsAnyTypeReference() bool { + return a.Slash != nil +} + +func (a *FieldReferenceNode) Value() string { + if a.Open != nil { + if a.Slash != nil { + return string(a.Open.Rune) + string(a.URLPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Name.AsIdentifier()) +} + +// CompactOptionsNode represents a compact options declaration, as used with +// fields, enum values, and extension ranges. Example: +// +// [deprecated = true, json_name = "foo_bar"] +type CompactOptionsNode struct { + compositeNode + OpenBracket *RuneNode + Options []*OptionNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Options)-1, with each item + // in Options having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be +// non-nil. The commas arg must have a length that is one less than the +// length of opts. The opts arg must not be empty. +func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(opts) == 0 && len(commas) != 0 { + panic("opts is empty but commas is not") + } + if len(opts) != len(commas) && len(opts) != len(commas)+1 { + panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas))) + } + children := make([]Node, 0, len(opts)+len(commas)+2) + children = append(children, openBracket) + if len(opts) > 0 { + for i, opt := range opts { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if opt == nil { + panic(fmt.Sprintf("opts[%d] is nil", i)) + } + children = append(children, opt) + } + if len(opts) == len(commas) { // Add the erroneous, but tolerated trailing comma. + if commas[len(commas)-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", len(commas)-1)) + } + children = append(children, commas[len(commas)-1]) + } + } + children = append(children, closeBracket) + + return &CompactOptionsNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Options: opts, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (e *CompactOptionsNode) GetElements() []*OptionNode { + if e == nil { + return nil + } + return e.Options +} + +// NodeWithOptions represents a node in the AST that contains +// option statements. +type NodeWithOptions interface { + Node + RangeOptions(func(*OptionNode) bool) +} + +var _ NodeWithOptions = FileDeclNode(nil) +var _ NodeWithOptions = MessageDeclNode(nil) +var _ NodeWithOptions = OneofDeclNode(nil) +var _ NodeWithOptions = (*EnumNode)(nil) +var _ NodeWithOptions = (*ServiceNode)(nil) +var _ NodeWithOptions = RPCDeclNode(nil) +var _ NodeWithOptions = FieldDeclNode(nil) +var _ NodeWithOptions = EnumValueDeclNode(nil) +var _ NodeWithOptions = (*ExtensionRangeNode)(nil) +var _ NodeWithOptions = (*NoSourceNode)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/ast/ranges.go b/vendor/github.com/bufbuild/protocompile/ast/ranges.go new file mode 100644 index 00000000..c42908e1 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/ranges.go @@ -0,0 +1,386 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// ExtensionRangeNode represents an extension range declaration in an extendable +// message. Example: +// +// extensions 100 to max; +type ExtensionRangeNode struct { + compositeNode + Keyword *KeywordNode + Ranges []*RangeNode + // Commas represent the separating ',' characters between ranges. The + // length of this slice must be exactly len(Ranges)-1, each item in Ranges + // having a corresponding item in this slice *except the last* (since a + // trailing comma is not allowed). + Commas []*RuneNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*ExtensionRangeNode) msgElement() {} + +// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be +// non-nil except opts, which may be nil. +// - keyword: The token corresponding to the "extends" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - opts: The node corresponding to options that apply to each of the ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + numChildren := len(ranges)*2 + 1 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &ExtensionRangeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Options: opts, + Semicolon: semicolon, + } +} + +func (e *ExtensionRangeNode) RangeOptions(fn func(*OptionNode) bool) { + for _, opt := range e.Options.Options { + if !fn(opt) { + return + } + } +} + +// RangeDeclNode is a placeholder interface for AST nodes that represent +// numeric values. This allows NoSourceNode to be used in place of *RangeNode +// for some usages. +type RangeDeclNode interface { + Node + RangeStart() Node + RangeEnd() Node +} + +var _ RangeDeclNode = (*RangeNode)(nil) +var _ RangeDeclNode = (*NoSourceNode)(nil) + +// RangeNode represents a range expression, used in both extension ranges and +// reserved ranges. Example: +// +// 1000 to max +type RangeNode struct { + compositeNode + StartVal IntValueNode + // if To is non-nil, then exactly one of EndVal or Max must also be non-nil + To *KeywordNode + // EndVal and Max are mutually exclusive + EndVal IntValueNode + Max *KeywordNode +} + +// NewRangeNode creates a new *RangeNode. The start argument must be non-nil. +// The to argument represents the "to" keyword. If present (i.e. if it is non-nil), +// then so must be exactly one of end or max. If max is non-nil, it indicates a +// "100 to max" style range. But if end is non-nil, the end of the range is a +// literal, such as "100 to 200". +func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd *KeywordNode) *RangeNode { + if start == nil { + panic("start is nil") + } + numChildren := 1 + if to != nil { + if end == nil && maxEnd == nil { + panic("to is not nil, but end and max both are") + } + if end != nil && maxEnd != nil { + panic("end and max cannot be both non-nil") + } + numChildren = 3 + } else { + if end != nil { + panic("to is nil, but end is not") + } + if maxEnd != nil { + panic("to is nil, but max is not") + } + } + children := make([]Node, 0, numChildren) + children = append(children, start) + if to != nil { + children = append(children, to) + if end != nil { + children = append(children, end) + } else { + children = append(children, maxEnd) + } + } + return &RangeNode{ + compositeNode: compositeNode{ + children: children, + }, + StartVal: start, + To: to, + EndVal: end, + Max: maxEnd, + } +} + +func (n *RangeNode) RangeStart() Node { + return n.StartVal +} + +func (n *RangeNode) RangeEnd() Node { + if n.Max != nil { + return n.Max + } + if n.EndVal != nil { + return n.EndVal + } + return n.StartVal +} + +func (n *RangeNode) StartValue() interface{} { + return n.StartVal.Value() +} + +func (n *RangeNode) StartValueAsInt32(minVal, maxVal int32) (int32, bool) { + return AsInt32(n.StartVal, minVal, maxVal) +} + +func (n *RangeNode) EndValue() interface{} { + if n.EndVal == nil { + return nil + } + return n.EndVal.Value() +} + +func (n *RangeNode) EndValueAsInt32(minVal, maxVal int32) (int32, bool) { + if n.Max != nil { + return maxVal, true + } + if n.EndVal == nil { + return n.StartValueAsInt32(minVal, maxVal) + } + return AsInt32(n.EndVal, minVal, maxVal) +} + +// ReservedNode represents reserved declaration, which can be used to reserve +// either names or numbers. Examples: +// +// reserved 1, 10-12, 15; +// reserved "foo", "bar", "baz"; +// reserved foo, bar, baz; +type ReservedNode struct { + compositeNode + Keyword *KeywordNode + // If non-empty, this node represents reserved ranges, and Names and Identifiers + // will be empty. + Ranges []*RangeNode + // If non-empty, this node represents reserved names as string literals, and + // Ranges and Identifiers will be empty. String literals are used for reserved + // names in proto2 and proto3 syntax. + Names []StringValueNode + // If non-empty, this node represents reserved names as identifiers, and Ranges + // and Names will be empty. Identifiers are used for reserved names in editions. + Identifiers []*IdentNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending + // on whether this node represents reserved ranges or reserved names. Each item + // in Ranges or Names has a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + Semicolon *RuneNode +} + +func (*ReservedNode) msgElement() {} +func (*ReservedNode) enumElement() {} + +// NewReservedRangesNode creates a new *ReservedNode that represents reserved +// numeric ranges. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + children := make([]Node, 0, len(ranges)*2+1) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Semicolon: semicolon, + } +} + +// NewReservedNamesNode creates a new *ReservedNode that represents reserved +// names. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - names: One or more names. +// - commas: Tokens that represent the "," runes that delimit the names. +// The length of commas must be one less than the length of names. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if len(names) == 0 { + panic("must have at least one name") + } + if len(commas) != len(names)-1 { + panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) + } + numChildren := len(names) * 2 + if semicolon != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, name := range names { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if name == nil { + panic(fmt.Sprintf("names[%d] is nil", i)) + } + children = append(children, name) + } + if semicolon != nil { + children = append(children, semicolon) + } + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Names: names, + Commas: commas, + Semicolon: semicolon, + } +} + +// NewReservedIdentifiersNode creates a new *ReservedNode that represents reserved +// names. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - names: One or more names. +// - commas: Tokens that represent the "," runes that delimit the names. +// The length of commas must be one less than the length of names. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedIdentifiersNode(keyword *KeywordNode, names []*IdentNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if len(names) == 0 { + panic("must have at least one name") + } + if len(commas) != len(names)-1 { + panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) + } + numChildren := len(names) * 2 + if semicolon != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, name := range names { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if name == nil { + panic(fmt.Sprintf("names[%d] is nil", i)) + } + children = append(children, name) + } + if semicolon != nil { + children = append(children, semicolon) + } + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Identifiers: names, + Commas: commas, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/service.go b/vendor/github.com/bufbuild/protocompile/ast/service.go new file mode 100644 index 00000000..eba22fd2 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/service.go @@ -0,0 +1,308 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// ServiceNode represents a service declaration. Example: +// +// service Foo { +// rpc Bar (Baz) returns (Bob); +// rpc Frobnitz (stream Parts) returns (Gyzmeaux); +// } +type ServiceNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []ServiceElement + CloseBrace *RuneNode +} + +func (*ServiceNode) fileElement() {} + +// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "service" keyword. +// - name: The token corresponding to the service's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the service body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *RPCNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ServiceElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &ServiceNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *ServiceNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// ServiceElement is an interface implemented by all AST nodes that can +// appear in the body of a service declaration. +type ServiceElement interface { + Node + serviceElement() +} + +var _ ServiceElement = (*OptionNode)(nil) +var _ ServiceElement = (*RPCNode)(nil) +var _ ServiceElement = (*EmptyDeclNode)(nil) + +// RPCDeclNode is a placeholder interface for AST nodes that represent RPC +// declarations. This allows NoSourceNode to be used in place of *RPCNode +// for some usages. +type RPCDeclNode interface { + NodeWithOptions + GetName() Node + GetInputType() Node + GetOutputType() Node +} + +var _ RPCDeclNode = (*RPCNode)(nil) +var _ RPCDeclNode = (*NoSourceNode)(nil) + +// RPCNode represents an RPC declaration. Example: +// +// rpc Foo (Bar) returns (Baz); +type RPCNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + Input *RPCTypeNode + Returns *KeywordNode + Output *RPCTypeNode + Semicolon *RuneNode + OpenBrace *RuneNode + Decls []RPCElement + CloseBrace *RuneNode +} + +func (n *RPCNode) serviceElement() {} + +// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + var children []Node + if semicolon == nil { + children = []Node{keyword, name, input, returns, output} + } else { + children = []Node{keyword, name, input, returns, output, semicolon} + } + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + Semicolon: semicolon, + } +} + +// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly +// options). All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the RPC body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 7+len(decls)) + children = append(children, keyword, name, input, returns, output, openBrace) + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid RPCElement type: %T", decl)) + } + children = append(children, decl) + } + children = append(children, closeBrace) + + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *RPCNode) GetName() Node { + return n.Name +} + +func (n *RPCNode) GetInputType() Node { + return n.Input.MessageType +} + +func (n *RPCNode) GetOutputType() Node { + return n.Output.MessageType +} + +func (n *RPCNode) RangeOptions(fn func(*OptionNode) bool) { + for _, decl := range n.Decls { + if opt, ok := decl.(*OptionNode); ok { + if !fn(opt) { + return + } + } + } +} + +// RPCElement is an interface implemented by all AST nodes that can +// appear in the body of an rpc declaration (aka method). +type RPCElement interface { + Node + methodElement() +} + +var _ RPCElement = (*OptionNode)(nil) +var _ RPCElement = (*EmptyDeclNode)(nil) + +// RPCTypeNode represents the declaration of a request or response type for an +// RPC. Example: +// +// (stream foo.Bar) +type RPCTypeNode struct { + compositeNode + OpenParen *RuneNode + Stream *KeywordNode + MessageType IdentValueNode + CloseParen *RuneNode +} + +// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil +// except stream, which may be nil. +// - openParen: The token corresponding to the "(" rune that starts the declaration. +// - stream: The token corresponding to the "stream" keyword or nil if not present. +// - msgType: The token corresponding to the message type's name. +// - closeParen: The token corresponding to the ")" rune that ends the declaration. +func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode { + if openParen == nil { + panic("openParen is nil") + } + if msgType == nil { + panic("msgType is nil") + } + if closeParen == nil { + panic("closeParen is nil") + } + var children []Node + if stream != nil { + children = []Node{openParen, stream, msgType, closeParen} + } else { + children = []Node{openParen, msgType, closeParen} + } + + return &RPCTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenParen: openParen, + Stream: stream, + MessageType: msgType, + CloseParen: closeParen, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/values.go b/vendor/github.com/bufbuild/protocompile/ast/values.go new file mode 100644 index 00000000..22bd208d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/values.go @@ -0,0 +1,519 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "math" + "strings" +) + +// ValueNode is an AST node that represents a literal value. +// +// It also includes references (e.g. IdentifierValueNode), which can be +// used as values in some contexts, such as describing the default value +// for a field, which can refer to an enum value. +// +// This also allows NoSourceNode to be used in place of a real value node +// for some usages. +type ValueNode interface { + Node + // Value returns a Go representation of the value. For scalars, this + // will be a string, int64, uint64, float64, or bool. This could also + // be an Identifier (e.g. IdentValueNodes). It can also be a composite + // literal: + // * For array literals, the type returned will be []ValueNode + // * For message literals, the type returned will be []*MessageFieldNode + // + // If the ValueNode is a NoSourceNode, indicating that there is no actual + // source code (and thus not AST information), then this method always + // returns nil. + Value() interface{} +} + +var _ ValueNode = (*IdentNode)(nil) +var _ ValueNode = (*CompoundIdentNode)(nil) +var _ ValueNode = (*StringLiteralNode)(nil) +var _ ValueNode = (*CompoundStringLiteralNode)(nil) +var _ ValueNode = (*UintLiteralNode)(nil) +var _ ValueNode = (*NegativeIntLiteralNode)(nil) +var _ ValueNode = (*FloatLiteralNode)(nil) +var _ ValueNode = (*SpecialFloatLiteralNode)(nil) +var _ ValueNode = (*SignedFloatLiteralNode)(nil) +var _ ValueNode = (*ArrayLiteralNode)(nil) +var _ ValueNode = (*MessageLiteralNode)(nil) +var _ ValueNode = (*NoSourceNode)(nil) + +// StringValueNode is an AST node that represents a string literal. +// Such a node can be a single literal (*StringLiteralNode) or a +// concatenation of multiple literals (*CompoundStringLiteralNode). +type StringValueNode interface { + ValueNode + AsString() string +} + +var _ StringValueNode = (*StringLiteralNode)(nil) +var _ StringValueNode = (*CompoundStringLiteralNode)(nil) + +// StringLiteralNode represents a simple string literal. Example: +// +// "proto2" +type StringLiteralNode struct { + terminalNode + // Val is the actual string value that the literal indicates. + Val string +} + +// NewStringLiteralNode creates a new *StringLiteralNode with the given val. +func NewStringLiteralNode(val string, tok Token) *StringLiteralNode { + return &StringLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *StringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *StringLiteralNode) AsString() string { + return n.Val +} + +// CompoundStringLiteralNode represents a compound string literal, which is +// the concatenaton of adjacent string literals. Example: +// +// "this " "is" " all one " "string" +type CompoundStringLiteralNode struct { + compositeNode + Val string +} + +// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that +// consists of the given string components. The components argument may not be +// empty. +func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode { + if len(components) == 0 { + panic("must have at least one component") + } + children := make([]Node, len(components)) + var b strings.Builder + for i, comp := range components { + children[i] = comp + b.WriteString(comp.Val) + } + return &CompoundStringLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Val: b.String(), + } +} + +func (n *CompoundStringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *CompoundStringLiteralNode) AsString() string { + return n.Val +} + +// IntValueNode is an AST node that represents an integer literal. If +// an integer literal is too large for an int64 (or uint64 for +// positive literals), it is represented instead by a FloatValueNode. +type IntValueNode interface { + ValueNode + AsInt64() (int64, bool) + AsUint64() (uint64, bool) +} + +// AsInt32 range checks the given int value and returns its value is +// in the range or 0, false if it is outside the range. +func AsInt32(n IntValueNode, minVal, maxVal int32) (int32, bool) { + i, ok := n.AsInt64() + if !ok { + return 0, false + } + if i < int64(minVal) || i > int64(maxVal) { + return 0, false + } + return int32(i), true +} + +var _ IntValueNode = (*UintLiteralNode)(nil) +var _ IntValueNode = (*NegativeIntLiteralNode)(nil) + +// UintLiteralNode represents a simple integer literal with no sign character. +type UintLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val uint64 +} + +// NewUintLiteralNode creates a new *UintLiteralNode with the given val. +func NewUintLiteralNode(val uint64, tok Token) *UintLiteralNode { + return &UintLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *UintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *UintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *UintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +func (n *UintLiteralNode) AsFloat() float64 { + return float64(n.Val) +} + +// NegativeIntLiteralNode represents an integer literal with a negative (-) sign. +type NegativeIntLiteralNode struct { + compositeNode + Minus *RuneNode + Uint *UintLiteralNode + Val int64 +} + +// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both +// arguments must be non-nil. +func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &NegativeIntLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Minus: sign, + Uint: i, + Val: -int64(i.Val), + } +} + +func (n *NegativeIntLiteralNode) Value() interface{} { + return n.Val +} + +func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) { + return n.Val, true +} + +func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) { + if n.Val < 0 { + return 0, false + } + return uint64(n.Val), true +} + +// FloatValueNode is an AST node that represents a numeric literal with +// a floating point, in scientific notation, or too large to fit in an +// int64 or uint64. +type FloatValueNode interface { + ValueNode + AsFloat() float64 +} + +var _ FloatValueNode = (*FloatLiteralNode)(nil) +var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil) +var _ FloatValueNode = (*UintLiteralNode)(nil) + +// FloatLiteralNode represents a floating point numeric literal. +type FloatLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val float64 +} + +// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val. +func NewFloatLiteralNode(val float64, tok Token) *FloatLiteralNode { + return &FloatLiteralNode{ + terminalNode: tok.asTerminalNode(), + Val: val, + } +} + +func (n *FloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *FloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SpecialFloatLiteralNode represents a special floating point numeric literal +// for "inf" and "nan" values. +type SpecialFloatLiteralNode struct { + *KeywordNode + Val float64 +} + +// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the +// given keyword. The given keyword should be "inf", "infinity", or "nan" +// in any case. +func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode { + var f float64 + switch strings.ToLower(name.Val) { + case "inf", "infinity": + f = math.Inf(1) + default: + f = math.NaN() + } + return &SpecialFloatLiteralNode{ + KeywordNode: name, + Val: f, + } +} + +func (n *SpecialFloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *SpecialFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SignedFloatLiteralNode represents a signed floating point number. +type SignedFloatLiteralNode struct { + compositeNode + Sign *RuneNode + Float FloatValueNode + Val float64 +} + +// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both +// arguments must be non-nil. +func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode { + if sign == nil { + panic("sign is nil") + } + if f == nil { + panic("f is nil") + } + children := []Node{sign, f} + val := f.AsFloat() + if sign.Rune == '-' { + val = -val + } + return &SignedFloatLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Sign: sign, + Float: f, + Val: val, + } +} + +func (n *SignedFloatLiteralNode) Value() interface{} { + return n.Val +} + +func (n *SignedFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// ArrayLiteralNode represents an array literal, which is only allowed inside of +// a MessageLiteralNode, to indicate values for a repeated field. Example: +// +// ["foo", "bar", "baz"] +type ArrayLiteralNode struct { + compositeNode + OpenBracket *RuneNode + Elements []ValueNode + // Commas represent the separating ',' characters between elements. The + // length of this slice must be exactly len(Elements)-1, with each item + // in Elements having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and +// closeBracket args must be non-nil and represent the "[" and "]" runes that +// surround the array values. The given commas arg must have a length that is +// one less than the length of the vals arg. However, vals may be empty, in +// which case commas must also be empty. +func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(vals) == 0 && len(commas) != 0 { + panic("vals is empty but commas is not") + } + if len(vals) > 0 && len(commas) != len(vals)-1 { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas))) + } + children := make([]Node, 0, len(vals)*2+1) + children = append(children, openBracket) + for i, val := range vals { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + } + children = append(children, closeBracket) + + return &ArrayLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Elements: vals, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (n *ArrayLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageLiteralNode represents a message literal, which is compatible with the +// protobuf text format and can be used for custom options with message types. +// Example: +// +// { foo:1 foo:2 foo:3 bar: } +type MessageLiteralNode struct { + compositeNode + Open *RuneNode // should be '{' or '<' + Elements []*MessageFieldNode + // Separator characters between elements, which can be either ',' + // or ';' if present. This slice must be exactly len(Elements) in + // length, with each item in Elements having one corresponding item + // in Seps. Separators in message literals are optional, so a given + // item in this slice may be nil to indicate absence of a separator. + Seps []*RuneNode + Close *RuneNode // should be '}' or '>', depending on Open +} + +// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and +// closeSym runes must not be nil and should be "{" and "}" or "<" and ">". +// +// Unlike separators (dots and commas) used for other AST nodes that represent +// a list of elements, the seps arg must be the SAME length as vals, and it may +// contain nil values to indicate absence of a separator (in fact, it could be +// all nils). +func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode { + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if len(seps) != len(vals) { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps))) + } + numChildren := len(vals) + 2 + for _, sep := range seps { + if sep != nil { + numChildren++ + } + } + children := make([]Node, 0, numChildren) + children = append(children, openSym) + for i, val := range vals { + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + if seps[i] != nil { + children = append(children, seps[i]) + } + } + children = append(children, closeSym) + + return &MessageLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Elements: vals, + Seps: seps, + Close: closeSym, + } +} + +func (n *MessageLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageFieldNode represents a single field (name and value) inside of a +// message literal. Example: +// +// foo:"bar" +type MessageFieldNode struct { + compositeNode + Name *FieldReferenceNode + // Sep represents the ':' separator between the name and value. If + // the value is a message or list literal (and thus starts with '<', + // '{', or '['), then the separator may be omitted and this field may + // be nil. + Sep *RuneNode + Val ValueNode +} + +// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep +// must be non-nil. +func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode { + if name == nil { + panic("name is nil") + } + if val == nil { + panic("val is nil") + } + numChildren := 2 + if sep != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name) + if sep != nil { + children = append(children, sep) + } + children = append(children, val) + + return &MessageFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Sep: sep, + Val: val, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/ast/walk.go b/vendor/github.com/bufbuild/protocompile/ast/walk.go new file mode 100644 index 00000000..00e71ab7 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/ast/walk.go @@ -0,0 +1,931 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "fmt" + +// Walk conducts a walk of the AST rooted at the given root using the +// given visitor. It performs a "pre-order traversal", visiting a +// given AST node before it visits that node's descendants. +// +// If a visitor returns an error while walking the tree, the entire +// operation is aborted and that error is returned. +func Walk(root Node, v Visitor, opts ...WalkOption) error { + var wOpts walkOptions + for _, opt := range opts { + opt(&wOpts) + } + return walk(root, v, wOpts) +} + +// WalkOption represents an option used with the Walk function. These +// allow optional before and after hooks to be invoked as each node in +// the tree is visited. +type WalkOption func(*walkOptions) + +type walkOptions struct { + before, after func(Node) error +} + +// WithBefore returns a WalkOption that will cause the given function to be +// invoked before a node is visited during a walk operation. If this hook +// returns an error, the node is not visited and the walk operation is aborted. +func WithBefore(fn func(Node) error) WalkOption { + return func(options *walkOptions) { + options.before = fn + } +} + +// WithAfter returns a WalkOption that will cause the given function to be +// invoked after a node (as well as any descendants) is visited during a walk +// operation. If this hook returns an error, the node is not visited and the +// walk operation is aborted. +// +// If the walk is aborted due to some other visitor or before hook returning an +// error, the after hook is still called for all nodes that have been visited. +// However, the walk operation fails with the first error it encountered, so any +// error returned from an after hook is effectively ignored. +func WithAfter(fn func(Node) error) WalkOption { + return func(options *walkOptions) { + options.after = fn + } +} + +func walk(root Node, v Visitor, opts walkOptions) (err error) { + if opts.before != nil { + if err := opts.before(root); err != nil { + return err + } + } + if opts.after != nil { + defer func() { + if afterErr := opts.after(root); afterErr != nil { + // if another call already returned an error then we + // have to ignore the error from the after hook + if err == nil { + err = afterErr + } + } + }() + } + + if err := Visit(root, v); err != nil { + return err + } + + if comp, ok := root.(CompositeNode); ok { + for _, child := range comp.Children() { + if err := walk(child, v, opts); err != nil { + return err + } + } + } + return nil +} + +// Visit implements the double-dispatch idiom and visits the given node by +// calling the appropriate method of the given visitor. +func Visit(n Node, v Visitor) error { + switch n := n.(type) { + case *FileNode: + return v.VisitFileNode(n) + case *SyntaxNode: + return v.VisitSyntaxNode(n) + case *EditionNode: + return v.VisitEditionNode(n) + case *PackageNode: + return v.VisitPackageNode(n) + case *ImportNode: + return v.VisitImportNode(n) + case *OptionNode: + return v.VisitOptionNode(n) + case *OptionNameNode: + return v.VisitOptionNameNode(n) + case *FieldReferenceNode: + return v.VisitFieldReferenceNode(n) + case *CompactOptionsNode: + return v.VisitCompactOptionsNode(n) + case *MessageNode: + return v.VisitMessageNode(n) + case *ExtendNode: + return v.VisitExtendNode(n) + case *ExtensionRangeNode: + return v.VisitExtensionRangeNode(n) + case *ReservedNode: + return v.VisitReservedNode(n) + case *RangeNode: + return v.VisitRangeNode(n) + case *FieldNode: + return v.VisitFieldNode(n) + case *GroupNode: + return v.VisitGroupNode(n) + case *MapFieldNode: + return v.VisitMapFieldNode(n) + case *MapTypeNode: + return v.VisitMapTypeNode(n) + case *OneofNode: + return v.VisitOneofNode(n) + case *EnumNode: + return v.VisitEnumNode(n) + case *EnumValueNode: + return v.VisitEnumValueNode(n) + case *ServiceNode: + return v.VisitServiceNode(n) + case *RPCNode: + return v.VisitRPCNode(n) + case *RPCTypeNode: + return v.VisitRPCTypeNode(n) + case *IdentNode: + return v.VisitIdentNode(n) + case *CompoundIdentNode: + return v.VisitCompoundIdentNode(n) + case *StringLiteralNode: + return v.VisitStringLiteralNode(n) + case *CompoundStringLiteralNode: + return v.VisitCompoundStringLiteralNode(n) + case *UintLiteralNode: + return v.VisitUintLiteralNode(n) + case *NegativeIntLiteralNode: + return v.VisitNegativeIntLiteralNode(n) + case *FloatLiteralNode: + return v.VisitFloatLiteralNode(n) + case *SpecialFloatLiteralNode: + return v.VisitSpecialFloatLiteralNode(n) + case *SignedFloatLiteralNode: + return v.VisitSignedFloatLiteralNode(n) + case *ArrayLiteralNode: + return v.VisitArrayLiteralNode(n) + case *MessageLiteralNode: + return v.VisitMessageLiteralNode(n) + case *MessageFieldNode: + return v.VisitMessageFieldNode(n) + case *KeywordNode: + return v.VisitKeywordNode(n) + case *RuneNode: + return v.VisitRuneNode(n) + case *EmptyDeclNode: + return v.VisitEmptyDeclNode(n) + default: + panic(fmt.Sprintf("unexpected type of node: %T", n)) + } +} + +// AncestorTracker is used to track the path of nodes during a walk operation. +// By passing AsWalkOptions to a call to Walk, a visitor can inspect the path to +// the node being visited using this tracker. +type AncestorTracker struct { + ancestors []Node +} + +// AsWalkOptions returns WalkOption values that will cause this ancestor tracker +// to track the path through the AST during the walk operation. +func (t *AncestorTracker) AsWalkOptions() []WalkOption { + return []WalkOption{ + WithBefore(func(n Node) error { + t.ancestors = append(t.ancestors, n) + return nil + }), + WithAfter(func(_ Node) error { + t.ancestors = t.ancestors[:len(t.ancestors)-1] + return nil + }), + } +} + +// Path returns a slice of nodes that represents the path from the root of the +// walk operaiton to the currently visited node. The first element in the path +// is the root supplied to Walk. The last element in the path is the currently +// visited node. +// +// The returned slice is not a defensive copy; so callers should NOT mutate it. +func (t *AncestorTracker) Path() []Node { + return t.ancestors +} + +// Parent returns the parent node of the currently visited node. If the node +// currently being visited is the root supplied to Walk then nil is returned. +func (t *AncestorTracker) Parent() Node { + if len(t.ancestors) <= 1 { + return nil + } + return t.ancestors[len(t.ancestors)-2] +} + +// VisitChildren visits all direct children of the given node using the given +// visitor. If visiting a child returns an error, that error is immediately +// returned, and other children will not be visited. +func VisitChildren(n CompositeNode, v Visitor) error { + for _, ch := range n.Children() { + if err := Visit(ch, v); err != nil { + return err + } + } + return nil +} + +// Visitor provides a technique for walking the AST that allows for +// dynamic dispatch, where a particular function is invoked based on +// the runtime type of the argument. +// +// It consists of a number of functions, each of which matches a +// concrete Node type. +// +// NOTE: As the language evolves, new methods may be added to this +// interface to correspond to new grammar elements. That is why it +// cannot be directly implemented outside this package. Visitor +// implementations must embed NoOpVisitor and then implement the +// subset of methods of interest. If such an implementation is used +// with an AST that has newer elements, the visitor will not do +// anything in response to the new node types. +// +// An alternative to embedding NoOpVisitor is to use an instance of +// SimpleVisitor. +// +// Visitors can be supplied to a Walk operation or passed to a call +// to Visit or VisitChildren. +// +// Note that there are some AST node types defined in this package +// that do not have corresponding visit methods. These are synthetic +// node types, that have specialized use from the parser, but never +// appear in an actual AST (which is always rooted at FileNode). +// These include SyntheticMapField, SyntheticOneof, +// SyntheticGroupMessageNode, and SyntheticMapEntryNode. +type Visitor interface { + // VisitFileNode is invoked when visiting a *FileNode in the AST. + VisitFileNode(*FileNode) error + // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. + VisitSyntaxNode(*SyntaxNode) error + // VisitEditionNode is invoked when visiting an *EditionNode in the AST. + VisitEditionNode(*EditionNode) error + // VisitPackageNode is invoked when visiting a *PackageNode in the AST. + VisitPackageNode(*PackageNode) error + // VisitImportNode is invoked when visiting an *ImportNode in the AST. + VisitImportNode(*ImportNode) error + // VisitOptionNode is invoked when visiting an *OptionNode in the AST. + VisitOptionNode(*OptionNode) error + // VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST. + VisitOptionNameNode(*OptionNameNode) error + // VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST. + VisitFieldReferenceNode(*FieldReferenceNode) error + // VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST. + VisitCompactOptionsNode(*CompactOptionsNode) error + // VisitMessageNode is invoked when visiting a *MessageNode in the AST. + VisitMessageNode(*MessageNode) error + // VisitExtendNode is invoked when visiting an *ExtendNode in the AST. + VisitExtendNode(*ExtendNode) error + // VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST. + VisitExtensionRangeNode(*ExtensionRangeNode) error + // VisitReservedNode is invoked when visiting a *ReservedNode in the AST. + VisitReservedNode(*ReservedNode) error + // VisitRangeNode is invoked when visiting a *RangeNode in the AST. + VisitRangeNode(*RangeNode) error + // VisitFieldNode is invoked when visiting a *FieldNode in the AST. + VisitFieldNode(*FieldNode) error + // VisitGroupNode is invoked when visiting a *GroupNode in the AST. + VisitGroupNode(*GroupNode) error + // VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST. + VisitMapFieldNode(*MapFieldNode) error + // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST. + VisitMapTypeNode(*MapTypeNode) error + // VisitOneofNode is invoked when visiting a *OneofNode in the AST. + VisitOneofNode(*OneofNode) error + // VisitEnumNode is invoked when visiting an *EnumNode in the AST. + VisitEnumNode(*EnumNode) error + // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST. + VisitEnumValueNode(*EnumValueNode) error + // VisitServiceNode is invoked when visiting a *ServiceNode in the AST. + VisitServiceNode(*ServiceNode) error + // VisitRPCNode is invoked when visiting an *RPCNode in the AST. + VisitRPCNode(*RPCNode) error + // VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST. + VisitRPCTypeNode(*RPCTypeNode) error + // VisitIdentNode is invoked when visiting an *IdentNode in the AST. + VisitIdentNode(*IdentNode) error + // VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST. + VisitCompoundIdentNode(*CompoundIdentNode) error + // VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST. + VisitStringLiteralNode(*StringLiteralNode) error + // VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST. + VisitCompoundStringLiteralNode(*CompoundStringLiteralNode) error + // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. + VisitUintLiteralNode(*UintLiteralNode) error + // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. + VisitNegativeIntLiteralNode(*NegativeIntLiteralNode) error + // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST. + VisitFloatLiteralNode(*FloatLiteralNode) error + // VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST. + VisitSpecialFloatLiteralNode(*SpecialFloatLiteralNode) error + // VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST. + VisitSignedFloatLiteralNode(*SignedFloatLiteralNode) error + // VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST. + VisitArrayLiteralNode(*ArrayLiteralNode) error + // VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST. + VisitMessageLiteralNode(*MessageLiteralNode) error + // VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST. + VisitMessageFieldNode(*MessageFieldNode) error + // VisitKeywordNode is invoked when visiting a *KeywordNode in the AST. + VisitKeywordNode(*KeywordNode) error + // VisitRuneNode is invoked when visiting a *RuneNode in the AST. + VisitRuneNode(*RuneNode) error + // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST. + VisitEmptyDeclNode(*EmptyDeclNode) error + + // Unexported method prevents callers from directly implementing. + isVisitor() +} + +// NoOpVisitor is a visitor implementation that does nothing. All methods +// unconditionally return nil. This can be embedded into a struct to make that +// struct implement the Visitor interface, and only the relevant visit methods +// then need to be implemented on the struct. +type NoOpVisitor struct{} + +var _ Visitor = NoOpVisitor{} + +func (n NoOpVisitor) isVisitor() {} + +func (n NoOpVisitor) VisitFileNode(_ *FileNode) error { + return nil +} + +func (n NoOpVisitor) VisitSyntaxNode(_ *SyntaxNode) error { + return nil +} + +func (n NoOpVisitor) VisitEditionNode(_ *EditionNode) error { + return nil +} + +func (n NoOpVisitor) VisitPackageNode(_ *PackageNode) error { + return nil +} + +func (n NoOpVisitor) VisitImportNode(_ *ImportNode) error { + return nil +} + +func (n NoOpVisitor) VisitOptionNode(_ *OptionNode) error { + return nil +} + +func (n NoOpVisitor) VisitOptionNameNode(_ *OptionNameNode) error { + return nil +} + +func (n NoOpVisitor) VisitFieldReferenceNode(_ *FieldReferenceNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompactOptionsNode(_ *CompactOptionsNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageNode(_ *MessageNode) error { + return nil +} + +func (n NoOpVisitor) VisitExtendNode(_ *ExtendNode) error { + return nil +} + +func (n NoOpVisitor) VisitExtensionRangeNode(_ *ExtensionRangeNode) error { + return nil +} + +func (n NoOpVisitor) VisitReservedNode(_ *ReservedNode) error { + return nil +} + +func (n NoOpVisitor) VisitRangeNode(_ *RangeNode) error { + return nil +} + +func (n NoOpVisitor) VisitFieldNode(_ *FieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitGroupNode(_ *GroupNode) error { + return nil +} + +func (n NoOpVisitor) VisitMapFieldNode(_ *MapFieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitMapTypeNode(_ *MapTypeNode) error { + return nil +} + +func (n NoOpVisitor) VisitOneofNode(_ *OneofNode) error { + return nil +} + +func (n NoOpVisitor) VisitEnumNode(_ *EnumNode) error { + return nil +} + +func (n NoOpVisitor) VisitEnumValueNode(_ *EnumValueNode) error { + return nil +} + +func (n NoOpVisitor) VisitServiceNode(_ *ServiceNode) error { + return nil +} + +func (n NoOpVisitor) VisitRPCNode(_ *RPCNode) error { + return nil +} + +func (n NoOpVisitor) VisitRPCTypeNode(_ *RPCTypeNode) error { + return nil +} + +func (n NoOpVisitor) VisitIdentNode(_ *IdentNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompoundIdentNode(_ *CompoundIdentNode) error { + return nil +} + +func (n NoOpVisitor) VisitStringLiteralNode(_ *StringLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitCompoundStringLiteralNode(_ *CompoundStringLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitUintLiteralNode(_ *UintLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitNegativeIntLiteralNode(_ *NegativeIntLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitFloatLiteralNode(_ *FloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitSpecialFloatLiteralNode(_ *SpecialFloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitSignedFloatLiteralNode(_ *SignedFloatLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitArrayLiteralNode(_ *ArrayLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageLiteralNode(_ *MessageLiteralNode) error { + return nil +} + +func (n NoOpVisitor) VisitMessageFieldNode(_ *MessageFieldNode) error { + return nil +} + +func (n NoOpVisitor) VisitKeywordNode(_ *KeywordNode) error { + return nil +} + +func (n NoOpVisitor) VisitRuneNode(_ *RuneNode) error { + return nil +} + +func (n NoOpVisitor) VisitEmptyDeclNode(_ *EmptyDeclNode) error { + return nil +} + +// SimpleVisitor is a visitor implementation that uses numerous function fields. +// If a relevant function field is not nil, then it will be invoked when a node +// is visited. +// +// In addition to a function for each concrete node type (and thus for each +// Visit* method of the Visitor interface), it also has function fields that +// accept interface types. So a visitor can, for example, easily treat all +// ValueNodes uniformly by providing a non-nil value for DoVisitValueNode +// instead of having to supply values for the various DoVisit*Node methods +// corresponding to all types that implement ValueNode. +// +// The most specific function provided that matches a given node is the one that +// will be invoked. For example, DoVisitStringValueNode will be called if +// present and applicable before DoVisitValueNode. Similarly, DoVisitValueNode +// would be called before DoVisitTerminalNode or DoVisitCompositeNode. The +// DoVisitNode is the most generic function and is called only if no more +// specific function is present for a given node type. +// +// The *UintLiteralNode type implements both IntValueNode and FloatValueNode. +// In this case, the DoVisitIntValueNode function is considered more specific +// than DoVisitFloatValueNode, so will be preferred if present. +// +// Similarly, *MapFieldNode and *GroupNode implement both FieldDeclNode and +// MessageDeclNode. In this case, the DoVisitFieldDeclNode function is +// treated as more specific than DoVisitMessageDeclNode, so will be preferred +// if both are present. +type SimpleVisitor struct { + DoVisitFileNode func(*FileNode) error + DoVisitSyntaxNode func(*SyntaxNode) error + DoVisitEditionNode func(*EditionNode) error + DoVisitPackageNode func(*PackageNode) error + DoVisitImportNode func(*ImportNode) error + DoVisitOptionNode func(*OptionNode) error + DoVisitOptionNameNode func(*OptionNameNode) error + DoVisitFieldReferenceNode func(*FieldReferenceNode) error + DoVisitCompactOptionsNode func(*CompactOptionsNode) error + DoVisitMessageNode func(*MessageNode) error + DoVisitExtendNode func(*ExtendNode) error + DoVisitExtensionRangeNode func(*ExtensionRangeNode) error + DoVisitReservedNode func(*ReservedNode) error + DoVisitRangeNode func(*RangeNode) error + DoVisitFieldNode func(*FieldNode) error + DoVisitGroupNode func(*GroupNode) error + DoVisitMapFieldNode func(*MapFieldNode) error + DoVisitMapTypeNode func(*MapTypeNode) error + DoVisitOneofNode func(*OneofNode) error + DoVisitEnumNode func(*EnumNode) error + DoVisitEnumValueNode func(*EnumValueNode) error + DoVisitServiceNode func(*ServiceNode) error + DoVisitRPCNode func(*RPCNode) error + DoVisitRPCTypeNode func(*RPCTypeNode) error + DoVisitIdentNode func(*IdentNode) error + DoVisitCompoundIdentNode func(*CompoundIdentNode) error + DoVisitStringLiteralNode func(*StringLiteralNode) error + DoVisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) error + DoVisitUintLiteralNode func(*UintLiteralNode) error + DoVisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) error + DoVisitFloatLiteralNode func(*FloatLiteralNode) error + DoVisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) error + DoVisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) error + DoVisitArrayLiteralNode func(*ArrayLiteralNode) error + DoVisitMessageLiteralNode func(*MessageLiteralNode) error + DoVisitMessageFieldNode func(*MessageFieldNode) error + DoVisitKeywordNode func(*KeywordNode) error + DoVisitRuneNode func(*RuneNode) error + DoVisitEmptyDeclNode func(*EmptyDeclNode) error + + DoVisitFieldDeclNode func(FieldDeclNode) error + DoVisitMessageDeclNode func(MessageDeclNode) error + + DoVisitIdentValueNode func(IdentValueNode) error + DoVisitStringValueNode func(StringValueNode) error + DoVisitIntValueNode func(IntValueNode) error + DoVisitFloatValueNode func(FloatValueNode) error + DoVisitValueNode func(ValueNode) error + + DoVisitTerminalNode func(TerminalNode) error + DoVisitCompositeNode func(CompositeNode) error + DoVisitNode func(Node) error +} + +var _ Visitor = (*SimpleVisitor)(nil) + +func (v *SimpleVisitor) isVisitor() {} + +func (v *SimpleVisitor) visitInterface(node Node) error { + switch n := node.(type) { + case FieldDeclNode: + if v.DoVisitFieldDeclNode != nil { + return v.DoVisitFieldDeclNode(n) + } + // *MapFieldNode and *GroupNode both implement both FieldDeclNode and + // MessageDeclNode, so handle other case here + if fn, ok := n.(MessageDeclNode); ok && v.DoVisitMessageDeclNode != nil { + return v.DoVisitMessageDeclNode(fn) + } + case MessageDeclNode: + if v.DoVisitMessageDeclNode != nil { + return v.DoVisitMessageDeclNode(n) + } + case IdentValueNode: + if v.DoVisitIdentValueNode != nil { + return v.DoVisitIdentValueNode(n) + } + case StringValueNode: + if v.DoVisitStringValueNode != nil { + return v.DoVisitStringValueNode(n) + } + case IntValueNode: + if v.DoVisitIntValueNode != nil { + return v.DoVisitIntValueNode(n) + } + // *UintLiteralNode implements both IntValueNode and FloatValueNode, + // so handle other case here + if fn, ok := n.(FloatValueNode); ok && v.DoVisitFloatValueNode != nil { + return v.DoVisitFloatValueNode(fn) + } + case FloatValueNode: + if v.DoVisitFloatValueNode != nil { + return v.DoVisitFloatValueNode(n) + } + } + + if n, ok := node.(ValueNode); ok && v.DoVisitValueNode != nil { + return v.DoVisitValueNode(n) + } + + switch n := node.(type) { + case TerminalNode: + if v.DoVisitTerminalNode != nil { + return v.DoVisitTerminalNode(n) + } + case CompositeNode: + if v.DoVisitCompositeNode != nil { + return v.DoVisitCompositeNode(n) + } + } + + if v.DoVisitNode != nil { + return v.DoVisitNode(node) + } + + return nil +} + +func (v *SimpleVisitor) VisitFileNode(node *FileNode) error { + if v.DoVisitFileNode != nil { + return v.DoVisitFileNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitSyntaxNode(node *SyntaxNode) error { + if v.DoVisitSyntaxNode != nil { + return v.DoVisitSyntaxNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitEditionNode(node *EditionNode) error { + if v.DoVisitEditionNode != nil { + return v.DoVisitEditionNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitPackageNode(node *PackageNode) error { + if v.DoVisitPackageNode != nil { + return v.DoVisitPackageNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitImportNode(node *ImportNode) error { + if v.DoVisitImportNode != nil { + return v.DoVisitImportNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitOptionNode(node *OptionNode) error { + if v.DoVisitOptionNode != nil { + return v.DoVisitOptionNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitOptionNameNode(node *OptionNameNode) error { + if v.DoVisitOptionNameNode != nil { + return v.DoVisitOptionNameNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitFieldReferenceNode(node *FieldReferenceNode) error { + if v.DoVisitFieldReferenceNode != nil { + return v.DoVisitFieldReferenceNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitCompactOptionsNode(node *CompactOptionsNode) error { + if v.DoVisitCompactOptionsNode != nil { + return v.DoVisitCompactOptionsNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitMessageNode(node *MessageNode) error { + if v.DoVisitMessageNode != nil { + return v.DoVisitMessageNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitExtendNode(node *ExtendNode) error { + if v.DoVisitExtendNode != nil { + return v.DoVisitExtendNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitExtensionRangeNode(node *ExtensionRangeNode) error { + if v.DoVisitExtensionRangeNode != nil { + return v.DoVisitExtensionRangeNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitReservedNode(node *ReservedNode) error { + if v.DoVisitReservedNode != nil { + return v.DoVisitReservedNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitRangeNode(node *RangeNode) error { + if v.DoVisitRangeNode != nil { + return v.DoVisitRangeNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitFieldNode(node *FieldNode) error { + if v.DoVisitFieldNode != nil { + return v.DoVisitFieldNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitGroupNode(node *GroupNode) error { + if v.DoVisitGroupNode != nil { + return v.DoVisitGroupNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitMapFieldNode(node *MapFieldNode) error { + if v.DoVisitMapFieldNode != nil { + return v.DoVisitMapFieldNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitMapTypeNode(node *MapTypeNode) error { + if v.DoVisitMapTypeNode != nil { + return v.DoVisitMapTypeNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitOneofNode(node *OneofNode) error { + if v.DoVisitOneofNode != nil { + return v.DoVisitOneofNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitEnumNode(node *EnumNode) error { + if v.DoVisitEnumNode != nil { + return v.DoVisitEnumNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitEnumValueNode(node *EnumValueNode) error { + if v.DoVisitEnumValueNode != nil { + return v.DoVisitEnumValueNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitServiceNode(node *ServiceNode) error { + if v.DoVisitServiceNode != nil { + return v.DoVisitServiceNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitRPCNode(node *RPCNode) error { + if v.DoVisitRPCNode != nil { + return v.DoVisitRPCNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitRPCTypeNode(node *RPCTypeNode) error { + if v.DoVisitRPCTypeNode != nil { + return v.DoVisitRPCTypeNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitIdentNode(node *IdentNode) error { + if v.DoVisitIdentNode != nil { + return v.DoVisitIdentNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitCompoundIdentNode(node *CompoundIdentNode) error { + if v.DoVisitCompoundIdentNode != nil { + return v.DoVisitCompoundIdentNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitStringLiteralNode(node *StringLiteralNode) error { + if v.DoVisitStringLiteralNode != nil { + return v.DoVisitStringLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitCompoundStringLiteralNode(node *CompoundStringLiteralNode) error { + if v.DoVisitCompoundStringLiteralNode != nil { + return v.DoVisitCompoundStringLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitUintLiteralNode(node *UintLiteralNode) error { + if v.DoVisitUintLiteralNode != nil { + return v.DoVisitUintLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitNegativeIntLiteralNode(node *NegativeIntLiteralNode) error { + if v.DoVisitNegativeIntLiteralNode != nil { + return v.DoVisitNegativeIntLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitFloatLiteralNode(node *FloatLiteralNode) error { + if v.DoVisitFloatLiteralNode != nil { + return v.DoVisitFloatLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitSpecialFloatLiteralNode(node *SpecialFloatLiteralNode) error { + if v.DoVisitSpecialFloatLiteralNode != nil { + return v.DoVisitSpecialFloatLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitSignedFloatLiteralNode(node *SignedFloatLiteralNode) error { + if v.DoVisitSignedFloatLiteralNode != nil { + return v.DoVisitSignedFloatLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitArrayLiteralNode(node *ArrayLiteralNode) error { + if v.DoVisitArrayLiteralNode != nil { + return v.DoVisitArrayLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitMessageLiteralNode(node *MessageLiteralNode) error { + if v.DoVisitMessageLiteralNode != nil { + return v.DoVisitMessageLiteralNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitMessageFieldNode(node *MessageFieldNode) error { + if v.DoVisitMessageFieldNode != nil { + return v.DoVisitMessageFieldNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitKeywordNode(node *KeywordNode) error { + if v.DoVisitKeywordNode != nil { + return v.DoVisitKeywordNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitRuneNode(node *RuneNode) error { + if v.DoVisitRuneNode != nil { + return v.DoVisitRuneNode(node) + } + return v.visitInterface(node) +} + +func (v *SimpleVisitor) VisitEmptyDeclNode(node *EmptyDeclNode) error { + if v.DoVisitEmptyDeclNode != nil { + return v.DoVisitEmptyDeclNode(node) + } + return v.visitInterface(node) +} diff --git a/vendor/github.com/bufbuild/protocompile/compiler.go b/vendor/github.com/bufbuild/protocompile/compiler.go new file mode 100644 index 00000000..b9a6d15e --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/compiler.go @@ -0,0 +1,682 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "runtime" + "runtime/debug" + "strings" + "sync" + + "golang.org/x/sync/semaphore" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/options" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/sourceinfo" +) + +// Compiler handles compilation tasks, to turn protobuf source files, or other +// intermediate representations, into fully linked descriptors. +// +// The compilation process involves five steps for each protobuf source file: +// 1. Parsing the source into an AST (abstract syntax tree). +// 2. Converting the AST into descriptor protos. +// 3. Linking descriptor protos into fully linked descriptors. +// 4. Interpreting options. +// 5. Computing source code information. +// +// With fully linked descriptors, code generators and protoc plugins could be +// invoked (though that step is not implemented by this package and not a +// responsibility of this type). +type Compiler struct { + // Resolves path/file names into source code or intermediate representations + // for protobuf source files. This is how the compiler loads the files to + // be compiled as well as all dependencies. This field is the only required + // field. + Resolver Resolver + // The maximum parallelism to use when compiling. If unspecified or set to + // a non-positive value, then min(runtime.NumCPU(), runtime.GOMAXPROCS(-1)) + // will be used. + MaxParallelism int + // A custom error and warning reporter. If unspecified a default reporter + // is used. A default reporter fails the compilation after encountering any + // errors and ignores all warnings. + Reporter reporter.Reporter + + // If unspecified or set to SourceInfoNone, source code information will not + // be included in the resulting descriptors. Source code information is + // metadata in the file descriptor that provides position information (i.e. + // the line and column where file elements were defined) as well as comments. + // + // If set to SourceInfoStandard, normal source code information will be + // included in the resulting descriptors. This matches the output of protoc + // (the reference compiler for Protocol Buffers). If set to + // SourceInfoMoreComments, the resulting descriptor will attempt to preserve + // as many comments as possible, for all elements in the file, not just for + // complete declarations. + // + // If Resolver returns descriptors or descriptor protos for a file, then + // those descriptors will not be modified. If they do not already include + // source code info, they will be left that way when the compile operation + // concludes. Similarly, if they already have source code info but this flag + // is false, existing info will be left in place. + SourceInfoMode SourceInfoMode + + // If true, ASTs are retained in compilation results for which an AST was + // constructed. So any linker.Result value in the resulting compiled files + // will have an AST, in addition to descriptors. If left false, the AST + // will be removed as soon as it's no longer needed. This can help reduce + // total memory usage for operations involving a large number of files. + RetainASTs bool + + // If non-nil, the set of symbols already known. Any symbols in the current + // compilation will be added to it. If the compilation tries to redefine any + // of these symbols, it will be reported as a collision. + // + // This allows a large compilation to be split up into multiple, smaller + // operations and still be able to identify naming collisions and extension + // number collisions across all operations. + Symbols *linker.Symbols +} + +// SourceInfoMode indicates how source code info is generated by a Compiler. +type SourceInfoMode int + +const ( + // SourceInfoNone indicates that no source code info is generated. + SourceInfoNone = SourceInfoMode(0) + // SourceInfoStandard indicates that the standard source code info is + // generated, which includes comments only for complete declarations. + SourceInfoStandard = SourceInfoMode(1) + // SourceInfoExtraComments indicates that source code info is generated + // and will include comments for all elements (more comments than would + // be found in a descriptor produced by protoc). + SourceInfoExtraComments = SourceInfoMode(2) + // SourceInfoExtraOptionLocations indicates that source code info is + // generated with additional locations for elements inside of message + // literals in option values. This can be combined with the above by + // bitwise-OR'ing it with SourceInfoExtraComments. + SourceInfoExtraOptionLocations = SourceInfoMode(4) +) + +// Compile compiles the given file names into fully-linked descriptors. The +// compiler's resolver is used to locate source code (or intermediate artifacts +// such as parsed ASTs or descriptor protos) and then do what is necessary to +// transform that into descriptors (parsing, linking, etc). +// +// Elements in the given returned files will implement [linker.Result] if the +// compiler had to link it (i.e. the resolver provided either a descriptor proto +// or source code). That result will contain a full AST for the file if the +// compiler had to parse it (i.e. the resolver provided source code for that +// file). +func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, error) { + if len(files) == 0 { + return nil, nil + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + par := c.MaxParallelism + if par <= 0 { + par = runtime.GOMAXPROCS(-1) + cpus := runtime.NumCPU() + if par > cpus { + par = cpus + } + } + + h := reporter.NewHandler(c.Reporter) + + sym := c.Symbols + if sym == nil { + sym = &linker.Symbols{} + } + e := executor{ + c: c, + h: h, + s: semaphore.NewWeighted(int64(par)), + cancel: cancel, + sym: sym, + results: map[string]*result{}, + } + + // We lock now and create all tasks under lock to make sure that no + // async task can create a duplicate result. For example, if files + // contains both "foo.proto" and "bar.proto", then there is a race + // after we start compiling "foo.proto" between this loop and the + // async compilation task to create the result for "bar.proto". But + // we need to know if the file is directly requested for compilation, + // so we need this loop to define the result. So this loop holds the + // lock the whole time so async tasks can't create a result first. + results := make([]*result, len(files)) + func() { + e.mu.Lock() + defer e.mu.Unlock() + for i, f := range files { + results[i] = e.compileLocked(ctx, f, true) + } + }() + + descs := make([]linker.File, len(files)) + var firstError error + for i, r := range results { + select { + case <-r.ready: + case <-ctx.Done(): + return nil, ctx.Err() + } + if r.err != nil { + if firstError == nil { + firstError = r.err + } + } + descs[i] = r.res + } + + if err := h.Error(); err != nil { + return descs, err + } + // this should probably never happen; if any task returned an + // error, h.Error() should be non-nil + return descs, firstError +} + +type result struct { + name string + ready chan struct{} + + // true if this file was explicitly provided to the compiler; otherwise + // this file is an import that is implicitly included + explicitFile bool + + // produces a linker.File or error, only available when ready is closed + res linker.File + err error + + mu sync.Mutex + // the results that are dependencies of this result; this result is + // blocked, waiting on these dependencies to complete + blockedOn []string +} + +func (r *result) fail(err error) { + r.err = err + close(r.ready) +} + +func (r *result) complete(f linker.File) { + r.res = f + close(r.ready) +} + +func (r *result) setBlockedOn(deps []string) { + r.mu.Lock() + defer r.mu.Unlock() + r.blockedOn = deps +} + +func (r *result) getBlockedOn() []string { + r.mu.Lock() + defer r.mu.Unlock() + return r.blockedOn +} + +type executor struct { + c *Compiler + h *reporter.Handler + s *semaphore.Weighted + cancel context.CancelFunc + sym *linker.Symbols + + descriptorProtoCheck sync.Once + descriptorProtoIsCustom bool + + mu sync.Mutex + results map[string]*result +} + +func (e *executor) compile(ctx context.Context, file string) *result { + e.mu.Lock() + defer e.mu.Unlock() + + return e.compileLocked(ctx, file, false) +} + +func (e *executor) compileLocked(ctx context.Context, file string, explicitFile bool) *result { + r := e.results[file] + if r != nil { + return r + } + + r = &result{ + name: file, + ready: make(chan struct{}), + explicitFile: explicitFile, + } + e.results[file] = r + go func() { + defer func() { + if p := recover(); p != nil { + if r.err == nil { + // TODO: strip top frames from stack trace so that the panic is + // the top of the trace? + panicErr := PanicError{File: file, Value: p, Stack: string(debug.Stack())} + r.fail(panicErr) + } + // TODO: if r.err != nil, then this task has already + // failed and there's nothing we can really do to + // communicate this panic to parent goroutine. This + // means the panic must have happened *after* the + // failure was already recorded (or during?) + // It would be nice to do something else here, like + // send the compiler an out-of-band error? Or log? + } + }() + e.doCompile(ctx, file, r) + }() + return r +} + +// PanicError is an error value that represents a recovered panic. It includes +// the value returned by recover() as well as the stack trace. +// +// This should generally only be seen if a Resolver implementation panics. +// +// An error returned by a Compiler may wrap a PanicError, so you may need to +// use errors.As(...) to access panic details. +type PanicError struct { + // The file that was being processed when the panic occurred + File string + // The value returned by recover() + Value interface{} + // A formatted stack trace + Stack string +} + +// Error implements the error interface. It does NOT include the stack trace. +// Use a type assertion and query the Stack field directly to access that. +func (p PanicError) Error() string { + return fmt.Sprintf("panic handling %q: %v", p.File, p.Value) +} + +type errFailedToResolve struct { + err error + path string +} + +func (e errFailedToResolve) Error() string { + errMsg := e.err.Error() + if strings.Contains(errMsg, e.path) { + // underlying error already refers to path in question, so we don't need to add more context + return errMsg + } + return fmt.Sprintf("could not resolve path %q: %s", e.path, e.err.Error()) +} + +func (e errFailedToResolve) Unwrap() error { + return e.err +} + +func (e *executor) hasOverrideDescriptorProto() bool { + e.descriptorProtoCheck.Do(func() { + defer func() { + // ignore a panic here; just assume no custom descriptor.proto + _ = recover() + }() + res, err := e.c.Resolver.FindFileByPath(descriptorProtoPath) + e.descriptorProtoIsCustom = err == nil && res.Desc != standardImports[descriptorProtoPath] + }) + return e.descriptorProtoIsCustom +} + +func (e *executor) doCompile(ctx context.Context, file string, r *result) { + t := task{e: e, h: e.h.SubHandler(), r: r} + if err := e.s.Acquire(ctx, 1); err != nil { + r.fail(err) + return + } + defer t.release() + + sr, err := e.c.Resolver.FindFileByPath(file) + if err != nil { + r.fail(errFailedToResolve{err: err, path: file}) + return + } + + defer func() { + // if results included a result, don't leave it open if it can be closed + if sr.Source == nil { + return + } + if c, ok := sr.Source.(io.Closer); ok { + _ = c.Close() + } + }() + + desc, err := t.asFile(ctx, file, sr) + if err != nil { + r.fail(err) + return + } + r.complete(desc) +} + +// A compilation task. The executor has a semaphore that limits the number +// of concurrent, running tasks. +type task struct { + e *executor + + // handler for this task + h *reporter.Handler + + // If true, this task needs to acquire a semaphore permit before running. + // If false, this task needs to release its semaphore permit on completion. + released bool + + // the result that is populated by this task + r *result +} + +func (t *task) release() { + if !t.released { + t.e.s.Release(1) + t.released = true + } +} + +const descriptorProtoPath = "google/protobuf/descriptor.proto" + +func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker.File, error) { + if r.Desc != nil { + if r.Desc.Path() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Desc.Path()) + } + return linker.NewFileRecursive(r.Desc) + } + + parseRes, err := t.asParseResult(name, r) + if err != nil { + return nil, err + } + if linkRes, ok := parseRes.(linker.Result); ok { + // if resolver returned a parse result that was actually a link result, + // use the link result directly (no other steps needed) + return linkRes, nil + } + + var deps []linker.File + fileDescriptorProto := parseRes.FileDescriptorProto() + var wantsDescriptorProto bool + imports := fileDescriptorProto.Dependency + + if t.e.hasOverrideDescriptorProto() { + // we only consider implicitly including descriptor.proto if it's overridden + if name != descriptorProtoPath { + var includesDescriptorProto bool + for _, dep := range fileDescriptorProto.Dependency { + if dep == descriptorProtoPath { + includesDescriptorProto = true + break + } + } + if !includesDescriptorProto { + wantsDescriptorProto = true + // make a defensive copy so we don't inadvertently mutate + // slice's backing array when adding this implicit dep + importsCopy := make([]string, len(imports)+1) + copy(importsCopy, imports) + importsCopy[len(imports)] = descriptorProtoPath + imports = importsCopy + } + } + } + + var overrideDescriptorProto linker.File + if len(imports) > 0 { + t.r.setBlockedOn(imports) + + results := make([]*result, len(fileDescriptorProto.Dependency)) + checked := map[string]struct{}{} + for i, dep := range fileDescriptorProto.Dependency { + span := findImportSpan(parseRes, dep) + if name == dep { + // doh! file imports itself + handleImportCycle(t.h, span, []string{name}, dep) + return nil, t.h.Error() + } + + res := t.e.compile(ctx, dep) + // check for dependency cycle to prevent deadlock + if err := t.e.checkForDependencyCycle(res, []string{name, dep}, span, checked); err != nil { + return nil, err + } + results[i] = res + } + deps = make([]linker.File, len(results)) + var descriptorProtoRes *result + if wantsDescriptorProto { + descriptorProtoRes = t.e.compile(ctx, descriptorProtoPath) + } + + // release our semaphore so dependencies can be processed w/out risk of deadlock + t.e.s.Release(1) + t.released = true + + // now we wait for them all to be computed + for i, res := range results { + select { + case <-res.ready: + if res.err != nil { + if rerr, ok := res.err.(errFailedToResolve); ok { + // We don't report errors to get file from resolver to handler since + // it's usually considered immediately fatal. However, if the reason + // we were resolving is due to an import, turn this into an error with + // source position that pinpoints the import statement and report it. + return nil, reporter.Error(findImportSpan(parseRes, res.name), rerr) + } + return nil, res.err + } + deps[i] = res.res + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if descriptorProtoRes != nil { + select { + case <-descriptorProtoRes.ready: + // descriptor.proto wasn't explicitly imported, so we can ignore a failure + if descriptorProtoRes.err == nil { + overrideDescriptorProto = descriptorProtoRes.res + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + // all deps resolved + t.r.setBlockedOn(nil) + // reacquire semaphore so we can proceed + if err := t.e.s.Acquire(ctx, 1); err != nil { + return nil, err + } + t.released = false + } + + return t.link(parseRes, deps, overrideDescriptorProto) +} + +func (e *executor) checkForDependencyCycle(res *result, sequence []string, span ast.SourceSpan, checked map[string]struct{}) error { + if _, ok := checked[res.name]; ok { + // already checked this one + return nil + } + checked[res.name] = struct{}{} + deps := res.getBlockedOn() + for _, dep := range deps { + // is this a cycle? + for _, file := range sequence { + if file == dep { + handleImportCycle(e.h, span, sequence, dep) + return e.h.Error() + } + } + + e.mu.Lock() + depRes := e.results[dep] + e.mu.Unlock() + if depRes == nil { + continue + } + if err := e.checkForDependencyCycle(depRes, append(sequence, dep), span, checked); err != nil { + return err + } + } + return nil +} + +func handleImportCycle(h *reporter.Handler, span ast.SourceSpan, importSequence []string, dep string) { + var buf bytes.Buffer + buf.WriteString("cycle found in imports: ") + for _, imp := range importSequence { + _, _ = fmt.Fprintf(&buf, "%q -> ", imp) + } + _, _ = fmt.Fprintf(&buf, "%q", dep) + // error is saved and returned in caller + _ = h.HandleErrorWithPos(span, errors.New(buf.String())) +} + +func findImportSpan(res parser.Result, dep string) ast.SourceSpan { + root := res.AST() + if root == nil { + return ast.UnknownSpan(res.FileNode().Name()) + } + for _, decl := range root.Decls { + if imp, ok := decl.(*ast.ImportNode); ok { + if imp.Name.AsString() == dep { + return root.NodeInfo(imp.Name) + } + } + } + // this should never happen... + return ast.UnknownSpan(res.FileNode().Name()) +} + +func (t *task) link(parseRes parser.Result, deps linker.Files, overrideDescriptorProtoRes linker.File) (linker.File, error) { + file, err := linker.Link(parseRes, deps, t.e.sym, t.h) + if err != nil { + return nil, err + } + + var interpretOpts []options.InterpreterOption + if overrideDescriptorProtoRes != nil { + interpretOpts = []options.InterpreterOption{options.WithOverrideDescriptorProto(overrideDescriptorProtoRes)} + } + + optsIndex, err := options.InterpretOptions(file, t.h, interpretOpts...) + if err != nil { + return nil, err + } + // now that options are interpreted, we can do some additional checks + if err := file.ValidateOptions(t.h, t.e.sym); err != nil { + return nil, err + } + if t.r.explicitFile { + file.CheckForUnusedImports(t.h) + } + if err := t.h.Error(); err != nil { + return nil, err + } + + if needsSourceInfo(parseRes, t.e.c.SourceInfoMode) { + var srcInfoOpts []sourceinfo.GenerateOption + if t.e.c.SourceInfoMode&SourceInfoExtraComments != 0 { + srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraComments()) + } + if t.e.c.SourceInfoMode&SourceInfoExtraOptionLocations != 0 { + srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraOptionLocations()) + } + parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfo(parseRes.AST(), optsIndex, srcInfoOpts...) + } else if t.e.c.SourceInfoMode == SourceInfoNone { + // If results came from unlinked FileDescriptorProto, it could have + // source info that we should strip. + parseRes.FileDescriptorProto().SourceCodeInfo = nil + } + if len(parseRes.FileDescriptorProto().GetSourceCodeInfo().GetLocation()) > 0 { + // If we have source code info in the descriptor proto at this point, + // we have to build the index of locations. + file.PopulateSourceCodeInfo() + } + + if !t.e.c.RetainASTs { + file.RemoveAST() + } + return file, nil +} + +func needsSourceInfo(parseRes parser.Result, mode SourceInfoMode) bool { + return mode != SourceInfoNone && parseRes.AST() != nil && parseRes.FileDescriptorProto().SourceCodeInfo == nil +} + +func (t *task) asParseResult(name string, r SearchResult) (parser.Result, error) { + if r.ParseResult != nil { + if r.ParseResult.FileDescriptorProto().GetName() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.ParseResult.FileDescriptorProto().GetName()) + } + // If the file descriptor needs linking, it will be mutated during the + // next stage. So to make anu mutations thread-safe, we must make a + // defensive copy. + res := parser.Clone(r.ParseResult) + return res, nil + } + + if r.Proto != nil { + if r.Proto.GetName() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Proto.GetName()) + } + // If the file descriptor needs linking, it will be mutated during the + // next stage. So to make any mutations thread-safe, we must make a + // defensive copy. + descProto := proto.Clone(r.Proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + return parser.ResultWithoutAST(descProto), nil + } + + file, err := t.asAST(name, r) + if err != nil { + return nil, err + } + + return parser.ResultFromAST(file, true, t.h) +} + +func (t *task) asAST(name string, r SearchResult) (*ast.FileNode, error) { + if r.AST != nil { + if r.AST.Name() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.AST.Name()) + } + return r.AST, nil + } + + return parser.Parse(name, r.Source, t.h) +} diff --git a/vendor/github.com/bufbuild/protocompile/doc.go b/vendor/github.com/bufbuild/protocompile/doc.go new file mode 100644 index 00000000..40067b30 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/doc.go @@ -0,0 +1,82 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protocompile provides the entry point for a high performance +// native Go protobuf compiler. "Compile" in this case just means parsing +// and validating source and generating fully-linked descriptors in the end. +// Unlike the protoc command-line tool, this package does not try to use the +// descriptors to perform code generation. +// +// The various sub-packages represent the various compile phases and contain +// models for the intermediate results. Those phases follow: +// 1. Parse into AST. +// Also see: parser.Parse +// 2. Convert AST to unlinked descriptor protos. +// Also see: parser.ResultFromAST +// 3. Link descriptor protos into "rich" descriptors. +// Also see: linker.Link +// 4. Interpret custom options. +// Also see: options.InterpretOptions +// 5. Generate source code info. +// Also see: sourceinfo.GenerateSourceInfo +// +// This package provides an easy-to-use interface that does all the relevant +// phases, based on the inputs given. If an input is provided as source, all +// phases apply. If an input is provided as a descriptor proto, only phases +// 3 to 5 apply. Nothing is necessary if provided a linked descriptor (which +// is usually only the case for select system dependencies). +// +// This package is also capable of taking advantage of multiple CPU cores, so +// a compilation involving thousands of files can be done very quickly by +// compiling things in parallel. +// +// # Resolvers +// +// A Resolver is how the compiler locates artifacts that are inputs to the +// compilation. For example, it can load protobuf source code that must be +// processed. A Resolver could also supply some already-compiled dependencies +// as fully-linked descriptors, alleviating the need to re-compile them. +// +// A Resolver can provide any of the following in response to a query for an +// input. +// - Source code: If a resolver answers a query with protobuf source, the +// compiler will parse and compile it. +// - AST: If a resolver answers a query with an AST, the parsing step can be +// skipped, and the rest of the compilation steps will be applied. +// - Descriptor proto: If a resolver answers a query with an unlinked proto, +// only the other compilation steps, including linking, need to be applied. +// - Descriptor: If a resolver answers a query with a fully-linked descriptor, +// nothing further needs to be done. The descriptor is used as-is. +// +// Compilation will use the Resolver to load the files that are to be compiled +// and also to load all dependencies (i.e. other files imported by those being +// compiled). +// +// # Compiler +// +// A Compiler accepts a list of file names and produces the list of descriptors. +// A Compiler has several fields that control how it works but only the Resolver +// field is required. A minimal Compiler, that resolves files by loading them +// from the file system based on the current working directory, can be had with +// the following simple snippet: +// +// compiler := protocompile.Compiler{ +// Resolver: &protocompile.SourceResolver{}, +// } +// +// This minimal Compiler will use default parallelism, equal to the number of +// CPU cores detected; it will not generate source code info in the resulting +// descriptors; and it will fail fast at the first sign of any error. All of +// these aspects can be customized by setting other fields. +package protocompile diff --git a/vendor/github.com/bufbuild/protocompile/go.work b/vendor/github.com/bufbuild/protocompile/go.work new file mode 100644 index 00000000..ba2d9c0d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/go.work @@ -0,0 +1,6 @@ +go 1.21 + +use ( + . + ./internal/benchmarks +) diff --git a/vendor/github.com/bufbuild/protocompile/go.work.sum b/vendor/github.com/bufbuild/protocompile/go.work.sum new file mode 100644 index 00000000..d977cf05 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/go.work.sum @@ -0,0 +1,235 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/bufbuild/protocompile v0.2.1-0.20230123224550-da57cd758c2f/go.mod h1:tleDrpPTlLUVmgnEoN6qBliKWqJaZFJXqZdFjTd+ocU= +github.com/bufbuild/protocompile v0.13.0/go.mod h1:dr++fGGeMPWHv7jPeT06ZKukm45NJscd7rUxQVzEKRk= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/jhump/gopoet v0.1.0 h1:gYjOPnzHd2nzB37xYQZxj4EIQNpBrBskRqQQ3q4ZgSg= +github.com/jhump/goprotoc v0.5.0 h1:Y1UgUX+txUznfqcGdDef8ZOVlyQvnV0pKWZH08RmZuo= +github.com/jhump/protoreflect v1.15.0 h1:U5T5/2LF0AZQFP9T4W5GfBjBaTruomrKobiR4E+oA/Q= +github.com/jhump/protoreflect v1.15.0/go.mod h1:qww51KYjD2hoCl/ohxw5cK2LSssFczrbO1t8Ld2TENs= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= diff --git a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go new file mode 100644 index 00000000..ee054fa7 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go @@ -0,0 +1,420 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package editions contains helpers related to resolving features for +// Protobuf editions. These are lower-level helpers. Higher-level helpers +// (which use this package under the hood) can be found in the exported +// protoutil package. +package editions + +import ( + "fmt" + "strings" + "sync" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" +) + +const ( + // MinSupportedEdition is the earliest edition supported by this module. + // It should be 2023 (the first edition) for the indefinite future. + MinSupportedEdition = descriptorpb.Edition_EDITION_2023 + + // MaxSupportedEdition is the most recent edition supported by this module. + MaxSupportedEdition = descriptorpb.Edition_EDITION_2023 +) + +var ( + // SupportedEditions is the exhaustive set of editions that protocompile + // can support. We don't allow it to compile future/unknown editions, to + // make sure we don't generate incorrect descriptors, in the event that + // a future edition introduces a change or new feature that requires + // new logic in the compiler. + SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition) + + // FeatureSetDescriptor is the message descriptor for the compiled-in + // version (in the descriptorpb package) of the google.protobuf.FeatureSet + // message type. + FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor() + // FeatureSetType is the message type for the compiled-in version (in + // the descriptorpb package) of google.protobuf.FeatureSet. + FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type() + + editionDefaults map[descriptorpb.Edition]*descriptorpb.FeatureSet + editionDefaultsInit sync.Once +) + +// HasFeatures is implemented by all options messages and provides a +// nil-receiver-safe way of accessing the features explicitly configured +// in those options. +type HasFeatures interface { + GetFeatures() *descriptorpb.FeatureSet +} + +var _ HasFeatures = (*descriptorpb.FileOptions)(nil) +var _ HasFeatures = (*descriptorpb.MessageOptions)(nil) +var _ HasFeatures = (*descriptorpb.FieldOptions)(nil) +var _ HasFeatures = (*descriptorpb.OneofOptions)(nil) +var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil) +var _ HasFeatures = (*descriptorpb.EnumOptions)(nil) +var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil) +var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil) +var _ HasFeatures = (*descriptorpb.MethodOptions)(nil) + +// ResolveFeature resolves a feature for the given descriptor. This simple +// helper examines the given element and its ancestors, searching for an +// override. If there is no overridden value, it returns a zero value. +func ResolveFeature( + element protoreflect.Descriptor, + fields ...protoreflect.FieldDescriptor, +) (protoreflect.Value, error) { + for { + var features *descriptorpb.FeatureSet + if withFeatures, ok := element.Options().(HasFeatures); ok { + // It should not really be possible for 'ok' to ever be false... + features = withFeatures.GetFeatures() + } + + // TODO: adaptFeatureSet is only looking at the first field. But if we needed to + // support an extension field inside a custom feature, we'd really need + // to check all fields. That gets particularly complicated if the traversal + // path of fields includes list and map values. Luckily, features are not + // supposed to be repeated and not supposed to themselves have extensions. + // So this should be fine, at least for now. + msgRef, err := adaptFeatureSet(features, fields[0]) + if err != nil { + return protoreflect.Value{}, err + } + // Navigate the fields to find the value + var val protoreflect.Value + for i, field := range fields { + if i > 0 { + msgRef = val.Message() + } + if !msgRef.Has(field) { + val = protoreflect.Value{} + break + } + val = msgRef.Get(field) + } + if val.IsValid() { + // All fields were set! + return val, nil + } + + parent := element.Parent() + if parent == nil { + // We've reached the end of the inheritance chain. + return protoreflect.Value{}, nil + } + element = parent + } +} + +// HasEdition should be implemented by values that implement +// [protoreflect.FileDescriptor], to provide access to the file's +// edition when its syntax is [protoreflect.Editions]. +type HasEdition interface { + // Edition returns the numeric value of a google.protobuf.Edition enum + // value that corresponds to the edition of this file. If the file does + // not use editions, it should return the enum value that corresponds + // to the syntax level, EDITION_PROTO2 or EDITION_PROTO3. + Edition() int32 +} + +// GetEdition returns the edition for a given element. It returns +// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that +// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN +// if the syntax of the given element is not recognized or if the edition +// cannot be ascertained from the element's [protoreflect.FileDescriptor]. +func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition { + switch d.ParentFile().Syntax() { + case protoreflect.Proto2: + return descriptorpb.Edition_EDITION_PROTO2 + case protoreflect.Proto3: + return descriptorpb.Edition_EDITION_PROTO3 + case protoreflect.Editions: + withEdition, ok := d.ParentFile().(HasEdition) + if !ok { + // The parent file should always be a *result, so we should + // never be able to actually get in here. If we somehow did + // have another implementation of protoreflect.FileDescriptor, + // it doesn't provide a way to get the edition, other than the + // potentially expensive step of generating a FileDescriptorProto + // and then querying for the edition from that. :/ + return descriptorpb.Edition_EDITION_UNKNOWN + } + return descriptorpb.Edition(withEdition.Edition()) + default: + return descriptorpb.Edition_EDITION_UNKNOWN + } +} + +// GetEditionDefaults returns the default feature values for the given edition. +// It returns nil if the given edition is not known. +// +// This only populates known features, those that are fields of [*descriptorpb.FeatureSet]. +// It does not populate any extension fields. +// +// The returned value must not be mutated as it references shared package state. +func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet { + editionDefaultsInit.Do(func() { + editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name)) + // Compute default for all known editions in descriptorpb. + for editionInt := range descriptorpb.Edition_name { + edition := descriptorpb.Edition(editionInt) + defaults := &descriptorpb.FeatureSet{} + defaultsRef := defaults.ProtoReflect() + fields := defaultsRef.Descriptor().Fields() + // Note: we are not computing defaults for extensions. Those are not needed + // by anything in the compiler, so we can get away with just computing + // defaults for these static, non-extension fields. + for i, length := 0, fields.Len(); i < length; i++ { + field := fields.Get(i) + val, err := GetFeatureDefault(edition, FeatureSetType, field) + if err != nil { + // should we fail somehow?? + continue + } + defaultsRef.Set(field, val) + } + editionDefaults[edition] = defaults + } + }) + return editionDefaults[edition] +} + +// GetFeatureDefault computes the default value for a feature. The given container +// is the message type that contains the field. This should usually be the descriptor +// for google.protobuf.FeatureSet, but can be a different message for computing the +// default value of custom features. +// +// Note that this always re-computes the default. For known fields of FeatureSet, +// it is more efficient to query from the statically computed default messages, +// like so: +// +// editions.GetEditionDefaults(edition).ProtoReflect().Get(feature) +func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { + opts, ok := feature.Options().(*descriptorpb.FieldOptions) + if !ok { + // this is most likely impossible except for contrived use cases... + return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options()) + } + maxEdition := descriptorpb.Edition(-1) + var maxVal string + for _, def := range opts.EditionDefaults { + if def.GetEdition() <= edition && def.GetEdition() > maxEdition { + maxEdition = def.GetEdition() + maxVal = def.GetValue() + } + } + if maxEdition == -1 { + // no matching default found + return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition) + } + // We use a typed nil so that it won't fall back to the global registry. Features + // should not use extensions or google.protobuf.Any, so a nil *Types is fine. + unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)} + // The string value is in the text format: either a field value literal or a + // message literal. (Repeated and map features aren't supported, so there's no + // array or map literal syntax to worry about.) + if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind { + fldVal := container.Zero().NewField(feature) + err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface()) + if err != nil { + return protoreflect.Value{}, err + } + return fldVal, nil + } + // The value is the textformat for the field. But prototext doesn't provide a way + // to unmarshal a single field value. To work around, we unmarshal into an enclosing + // message, which means we must prefix the value with the field name. + if feature.IsExtension() { + maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal) + } else { + maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal) + } + empty := container.New() + err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface()) + if err != nil { + return protoreflect.Value{}, err + } + return empty.Get(feature), nil +} + +func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) { + msgRef := msg.ProtoReflect() + var actualField protoreflect.FieldDescriptor + switch { + case field.IsExtension(): + // Extensions can be used directly with the feature set, even if + // field.ContainingMessage() != FeatureSetDescriptor. But only if + // the value is either not a message or is a message with the + // right descriptor, i.e. val.Descriptor() == field.Message(). + if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field { + if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 { + return msgRef, nil + } + // The field is not present, but the message has unrecognized values. So + // let's try to parse the unrecognized bytes, just in case they contain + // this extension. + temp := &descriptorpb.FeatureSet{} + unmarshaler := proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: resolverForExtension{field}, + } + if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil { + return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err) + } + return temp.ProtoReflect(), nil + } + case field.ContainingMessage() == FeatureSetDescriptor: + // Known field, not dynamically generated. Can directly use with the feature set. + return msgRef, nil + default: + actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number()) + } + + // If we get here, we have a dynamic field descriptor or an extension + // descriptor whose message type does not match the descriptor of the + // stored value. We need to copy its value into a dynamic message, + // which requires marshalling/unmarshalling. + // We only need to copy over the unrecognized bytes (if any) + // and the same field (if present). + data := msgRef.GetUnknown() + if actualField != nil && msgRef.Has(actualField) { + subset := &descriptorpb.FeatureSet{} + subset.ProtoReflect().Set(actualField, msgRef.Get(actualField)) + var err error + data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset) + if err != nil { + return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err) + } + } + if len(data) == 0 { + // No relevant data to copy over, so we can just return + // a zero value message + return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil + } + + other := dynamicpb.NewMessage(field.ContainingMessage()) + // We don't need to use a resolver for this step because we know that + // field is not an extension. And features are not allowed to themselves + // have extensions. + if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil { + return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err) + } + return other, nil +} + +type resolverForExtension struct { + ext protoreflect.ExtensionDescriptor +} + +func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) { + return nil, protoregistry.NotFound +} + +func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) { + return nil, protoregistry.NotFound +} + +func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if field == r.ext.FullName() { + return asExtensionType(r.ext), nil + } + return nil, protoregistry.NotFound +} + +func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() { + return asExtensionType(r.ext), nil + } + return nil, protoregistry.NotFound +} + +func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { + if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok { + return xtd.Type() + } + return dynamicpb.NewExtensionType(ext) +} + +func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition { + supportedEditions := map[string]descriptorpb.Edition{} + for editionNum := range descriptorpb.Edition_name { + edition := descriptorpb.Edition(editionNum) + if edition >= minEdition && edition <= maxEdition { + name := strings.TrimPrefix(edition.String(), "EDITION_") + supportedEditions[name] = edition + } + } + return supportedEditions +} + +// actualDescriptor returns the actual field descriptor referenced by msg that +// corresponds to the given ext (i.e. same number). It returns nil if msg has +// no reference, if the actual descriptor is the same as ext, or if ext is +// otherwise safe to use as is. +func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor { + if !msg.Has(ext) || ext.Message() == nil { + // nothing to match; safe as is + return nil + } + val := msg.Get(ext) + switch { + case ext.IsMap(): // should not actually be possible + expectedDescriptor := ext.MapValue().Message() + if expectedDescriptor == nil { + return nil // nothing to match + } + // We know msg.Has(field) is true, from above, so there's at least one entry. + var matches bool + val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool { + matches = val.Message().Descriptor() == expectedDescriptor + return false + }) + if matches { + return nil + } + case ext.IsList(): + // We know msg.Has(field) is true, from above, so there's at least one entry. + if val.List().Get(0).Message().Descriptor() == ext.Message() { + return nil + } + case !ext.IsMap(): + if val.Message().Descriptor() == ext.Message() { + return nil + } + } + // The underlying message descriptors do not match. So we need to return + // the actual field descriptor. Sadly, protoreflect.Message provides no way + // to query the field descriptor in a message by number. For non-extensions, + // one can query the associated message descriptor. But for extensions, we + // have to do the slow thing, and range through all fields looking for it. + var actualField protoreflect.FieldDescriptor + msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.Number() == ext.Number() { + actualField = fd + return false + } + return true + }) + return actualField +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset b/vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset new file mode 100644 index 0000000000000000000000000000000000000000..106ad8e4ae5e5b446c5da3a137d886518b93c99a GIT binary patch literal 605 zcmZ8f!EVz)5bY+RPB#)*%Kjy#8n1u z*r1}LmqU2SlW0?uYo|=h8vW|>!9?q~Tdnxk;iAA*l3vAWp)AT6%W54kU@!W|f7n;X zu37Jzu^YB0dd^fB{Pk`a7mu=}uo(RFez2dczw!KyH~h{P7YMBzv~EzlfiqNYjirOU zgvtIsIAu(UJ&Q}_N z9vl^JN=eM}vLV|VTw5$I(Sk|nVhBmfnx5rYNE=3e+w7#7d`hi~j1R0cdAW)mSqB}; z;qiDF>ScH@wQ$~07Q{Gyc`u@Y*qtw;li9l_Bns@)$@Dz9rHr~1QN&y4S~i+;{@+AA z<4;>*y!d=JjX%!b&rZ*0{*LJL#=~dzX>uC9@jHU^Z56+Jc*MK(6~yzfU3|M_`XJ$U T!Eh-&2>e!#*P literal 0 HcmV?d00001 diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go b/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go new file mode 100644 index 00000000..892524e6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go @@ -0,0 +1,84 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package featuresext provides file descriptors for the +// "google/protobuf/cpp_features.proto" and "google/protobuf/java_features.proto" +// standard import files. Unlike the other standard/well-known +// imports, these files have no standard Go package in their +// runtime with generated code. So in order to make them available +// as "standard imports" to compiler users, we must embed these +// descriptors into a Go package. +package featuresext + +import ( + _ "embed" + "fmt" + "sync" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + //go:embed cpp_features.protoset + cppFeatures []byte + + //go:embed java_features.protoset + javaFeatures []byte + + initOnce sync.Once + initCppFeatures protoreflect.FileDescriptor + initCppErr error + initJavaFeatures protoreflect.FileDescriptor + initJavaErr error +) + +func initDescriptors() { + initOnce.Do(func() { + initCppFeatures, initCppErr = buildDescriptor("google/protobuf/cpp_features.proto", cppFeatures) + initJavaFeatures, initJavaErr = buildDescriptor("google/protobuf/java_features.proto", javaFeatures) + }) +} + +func CppFeaturesDescriptor() (protoreflect.FileDescriptor, error) { + initDescriptors() + return initCppFeatures, initCppErr +} + +func JavaFeaturesDescriptor() (protoreflect.FileDescriptor, error) { + initDescriptors() + return initJavaFeatures, initJavaErr +} + +func buildDescriptor(name string, data []byte) (protoreflect.FileDescriptor, error) { + var files descriptorpb.FileDescriptorSet + err := proto.Unmarshal(data, &files) + if err != nil { + return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err) + } + if len(files.File) != 1 { + return nil, fmt.Errorf("failed to load descriptor for %q: expected embedded descriptor set to contain exactly one file but it instead has %d", name, len(files.File)) + } + if files.File[0].GetName() != name { + return nil, fmt.Errorf("failed to load descriptor for %q: embedded descriptor contains wrong file %q", name, files.File[0].GetName()) + } + descriptor, err := protodesc.NewFile(files.File[0], protoregistry.GlobalFiles) + if err != nil { + return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err) + } + return descriptor, nil +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset b/vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset new file mode 100644 index 0000000000000000000000000000000000000000..60de3eb758e1fbfef09eca85deb2a370af727d6a GIT binary patch literal 856 zcmbtTF>ljA6y~C|F4w9UGZ3`{Pi5&+7ezv<*b``K~1_^i8gr2YdY z=Cbw=fVwfU@&}k%5R`wx*-mK4%6RX-_kG`e?;ZWNP9MfX#2gPZDYWqO=K=wD< z-^n{dE=c`?kV|sgscV^I* z)aMz3xzI+r)Cw_5(h{tS;#47@p#sUwL7ECn3wV?u50yKxE4NR}r+183=VOiD$#rz( z%@|KYR;V^=CXMap12^;C6i@ER_hd>IY4meBWoW)q0S_)xd z${lPMc5j}&*QMM42I1_|`=ep^$Q!p#ddIC{@3ilY`p^5Pulx2bXOk|1v57G;vCv93kGqE_kZnehM%9?$5=iB-Ub>p{XX0h3{S7$LOd)xc;ULdBc1ns-aV|!VA Fqdy8TAUXg5 literal 0 HcmV?d00001 diff --git a/vendor/github.com/bufbuild/protocompile/internal/message_context.go b/vendor/github.com/bufbuild/protocompile/internal/message_context.go new file mode 100644 index 00000000..52acbdfd --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/message_context.go @@ -0,0 +1,98 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "bytes" + "fmt" + + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" +) + +// ParsedFile wraps an optional AST and required FileDescriptorProto. +// This is used so types like parser.Result can be passed to this internal package avoiding circular imports. +// Additionally, it makes it less likely that users might specify one or the other. +type ParsedFile interface { + // AST returns the parsed abstract syntax tree. This returns nil if the + // Result was created without an AST. + AST() *ast.FileNode + // FileDescriptorProto returns the file descriptor proto. + FileDescriptorProto() *descriptorpb.FileDescriptorProto +} + +// MessageContext provides information about the location in a descriptor +// hierarchy, for adding context to warnings and error messages. +type MessageContext struct { + // The relevant file + File ParsedFile + + // The type and fully-qualified name of the element within the file. + ElementType string + ElementName string + + // If the element being processed is an option (or *in* an option) + // on the named element above, this will be non-nil. + Option *descriptorpb.UninterpretedOption + // If the element being processed is inside a message literal in an + // option value, this will be non-empty and represent a traversal + // to the element in question. + OptAggPath string +} + +func (c *MessageContext) String() string { + var ctx bytes.Buffer + if c.ElementType != "file" { + _, _ = fmt.Fprintf(&ctx, "%s %s: ", c.ElementType, c.ElementName) + } + if c.Option != nil && c.Option.Name != nil { + ctx.WriteString("option ") + writeOptionName(&ctx, c.Option.Name) + if c.File.AST() == nil { + // if we have no source position info, try to provide as much context + // as possible (if nodes != nil, we don't need this because any errors + // will actually have file and line numbers) + if c.OptAggPath != "" { + _, _ = fmt.Fprintf(&ctx, " at %s", c.OptAggPath) + } + } + ctx.WriteString(": ") + } + return ctx.String() +} + +func writeOptionName(buf *bytes.Buffer, parts []*descriptorpb.UninterpretedOption_NamePart) { + first := true + for _, p := range parts { + if first { + first = false + } else { + buf.WriteByte('.') + } + nm := p.GetNamePart() + if nm[0] == '.' { + // skip leading dot + nm = nm[1:] + } + if p.GetIsExtension() { + buf.WriteByte('(') + buf.WriteString(nm) + buf.WriteByte(')') + } else { + buf.WriteString(nm) + } + } +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go b/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go new file mode 100644 index 00000000..850a0c66 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go @@ -0,0 +1,62 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package messageset + +import ( + "math" + "sync" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + messageSetSupport bool + messageSetSupportInit sync.Once +) + +// CanSupportMessageSets returns true if the protobuf-go runtime supports +// serializing messages with the message set wire format. +func CanSupportMessageSets() bool { + messageSetSupportInit.Do(func() { + // We check using the protodesc package, instead of just relying + // on protolegacy build tag, in case someone links in a fork of + // the protobuf-go runtime that supports legacy proto1 features + // or in case the protobuf-go runtime adds another mechanism to + // enable or disable it (such as environment variable). + _, err := protodesc.NewFile(&descriptorpb.FileDescriptorProto{ + Name: proto.String("test.proto"), + MessageType: []*descriptorpb.DescriptorProto{ + { + Name: proto.String("MessageSet"), + Options: &descriptorpb.MessageOptions{ + MessageSetWireFormat: proto.Bool(true), + }, + ExtensionRange: []*descriptorpb.DescriptorProto_ExtensionRange{ + { + Start: proto.Int32(1), + End: proto.Int32(math.MaxInt32), + }, + }, + }, + }, + }, nil) + // When message sets are not supported, the above returns an error: + // message "MessageSet" is a MessageSet, which is a legacy proto1 feature that is no longer supported + messageSetSupport = err == nil + }) + return messageSetSupport +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/norace.go b/vendor/github.com/bufbuild/protocompile/internal/norace.go new file mode 100644 index 00000000..2acf4e46 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/norace.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !race + +package internal + +const IsRace = false diff --git a/vendor/github.com/bufbuild/protocompile/internal/options.go b/vendor/github.com/bufbuild/protocompile/internal/options.go new file mode 100644 index 00000000..4eaa0f6a --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/options.go @@ -0,0 +1,71 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" +) + +type hasOptionNode interface { + OptionNode(part *descriptorpb.UninterpretedOption) ast.OptionDeclNode + FileNode() ast.FileDeclNode // needed in order to query for NodeInfo +} + +type errorHandler func(span ast.SourceSpan, format string, args ...interface{}) error + +func FindFirstOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { + return findOption(res, handler, scope, opts, name, false, true) +} + +func FindOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { + return findOption(res, handler, scope, opts, name, true, false) +} + +func findOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string, exact, first bool) (int, error) { + found := -1 + for i, opt := range opts { + if exact && len(opt.Name) != 1 { + continue + } + if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name { + continue + } + if first { + return i, nil + } + if found >= 0 { + optNode := res.OptionNode(opt) + fn := res.FileNode() + node := optNode.GetName() + nodeInfo := fn.NodeInfo(node) + return -1, handler(nodeInfo, "%s: option %s cannot be defined more than once", scope, name) + } + found = i + } + return found, nil +} + +func RemoveOption(uo []*descriptorpb.UninterpretedOption, indexToRemove int) []*descriptorpb.UninterpretedOption { + switch { + case indexToRemove == 0: + return uo[1:] + case indexToRemove == len(uo)-1: + return uo[:len(uo)-1] + default: + return append(uo[:indexToRemove], uo[indexToRemove+1:]...) + } +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/race.go b/vendor/github.com/bufbuild/protocompile/internal/race.go new file mode 100644 index 00000000..e70e414d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/race.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build race + +package internal + +const IsRace = true diff --git a/vendor/github.com/bufbuild/protocompile/internal/tags.go b/vendor/github.com/bufbuild/protocompile/internal/tags.go new file mode 100644 index 00000000..179728f0 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/tags.go @@ -0,0 +1,336 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "math" + +const ( + // MaxNormalTag is the maximum allowed tag number for a field in a normal message. + MaxNormalTag = 536870911 // 2^29 - 1 + + // MaxMessageSetTag is the maximum allowed tag number of a field in a message that + // uses the message set wire format. + MaxMessageSetTag = math.MaxInt32 - 1 + + // MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag + // since that is the absolute highest allowed.) + MaxTag = MaxMessageSetTag + + // SpecialReservedStart is the first tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedStart = 19000 + // SpecialReservedEnd is the last tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedEnd = 19999 + + // NB: It would be nice to use constants from generated code instead of + // hard-coding these here. But code-gen does not emit these as constants + // anywhere. The only places they appear in generated code are struct tags + // on fields of the generated descriptor protos. + + // FilePackageTag is the tag number of the package element in a file + // descriptor proto. + FilePackageTag = 2 + // FileDependencyTag is the tag number of the dependencies element in a + // file descriptor proto. + FileDependencyTag = 3 + // FileMessagesTag is the tag number of the messages element in a file + // descriptor proto. + FileMessagesTag = 4 + // FileEnumsTag is the tag number of the enums element in a file descriptor + // proto. + FileEnumsTag = 5 + // FileServicesTag is the tag number of the services element in a file + // descriptor proto. + FileServicesTag = 6 + // FileExtensionsTag is the tag number of the extensions element in a file + // descriptor proto. + FileExtensionsTag = 7 + // FileOptionsTag is the tag number of the options element in a file + // descriptor proto. + FileOptionsTag = 8 + // FileOptionsJavaStringCheckUTF8Tag is the tag number of the java_string_check_utf8 + // field in the FileOptions proto. + FileOptionsJavaStringCheckUTF8Tag = 27 + // FileOptionsFeaturesTag is the tag number of the features field in the + // FileOptions proto. + FileOptionsFeaturesTag = 50 + // FileSourceCodeInfoTag is the tag number of the source code info element + // in a file descriptor proto. + FileSourceCodeInfoTag = 9 + // FilePublicDependencyTag is the tag number of the public dependency element + // in a file descriptor proto. + FilePublicDependencyTag = 10 + // FileWeakDependencyTag is the tag number of the weak dependency element + // in a file descriptor proto. + FileWeakDependencyTag = 11 + // FileSyntaxTag is the tag number of the syntax element in a file + // descriptor proto. + FileSyntaxTag = 12 + // FileEditionTag is the tag number of the edition element in a file + // descriptor proto. + FileEditionTag = 14 + // MessageNameTag is the tag number of the name element in a message + // descriptor proto. + MessageNameTag = 1 + // MessageFieldsTag is the tag number of the fields element in a message + // descriptor proto. + MessageFieldsTag = 2 + // MessageNestedMessagesTag is the tag number of the nested messages + // element in a message descriptor proto. + MessageNestedMessagesTag = 3 + // MessageEnumsTag is the tag number of the enums element in a message + // descriptor proto. + MessageEnumsTag = 4 + // MessageExtensionRangesTag is the tag number of the extension ranges + // element in a message descriptor proto. + MessageExtensionRangesTag = 5 + // MessageExtensionsTag is the tag number of the extensions element in a + // message descriptor proto. + MessageExtensionsTag = 6 + // MessageOptionsTag is the tag number of the options element in a message + // descriptor proto. + MessageOptionsTag = 7 + // MessageOptionsFeaturesTag is the tag number of the features field in the + // MessageOptions proto. + MessageOptionsFeaturesTag = 12 + // MessageOneofsTag is the tag number of the one-ofs element in a message + // descriptor proto. + MessageOneofsTag = 8 + // MessageReservedRangesTag is the tag number of the reserved ranges element + // in a message descriptor proto. + MessageReservedRangesTag = 9 + // MessageReservedNamesTag is the tag number of the reserved names element + // in a message descriptor proto. + MessageReservedNamesTag = 10 + // ExtensionRangeStartTag is the tag number of the start index in an + // extension range proto. + ExtensionRangeStartTag = 1 + // ExtensionRangeEndTag is the tag number of the end index in an + // extension range proto. + ExtensionRangeEndTag = 2 + // ExtensionRangeOptionsTag is the tag number of the options element in an + // extension range proto. + ExtensionRangeOptionsTag = 3 + // ExtensionRangeOptionsDeclarationTag is the tag number of the declaration + // field in the ExtensionRangeOptions proto. + ExtensionRangeOptionsDeclarationTag = 2 + // ExtensionRangeOptionsVerificationTag is the tag number of the verification + // field in the ExtensionRangeOptions proto. + ExtensionRangeOptionsVerificationTag = 3 + // ExtensionRangeOptionsDeclarationNumberTag is the tag number of the number + // field in the ExtensionRangeOptions.Declaration proto. + ExtensionRangeOptionsDeclarationNumberTag = 1 + // ExtensionRangeOptionsDeclarationFullNameTag is the tag number of the full_name + // field in the ExtensionRangeOptions.Declaration proto. + ExtensionRangeOptionsDeclarationFullNameTag = 2 + // ExtensionRangeOptionsDeclarationTypeTag is the tag number of the type + // field in the ExtensionRangeOptions.Declaration proto. + ExtensionRangeOptionsDeclarationTypeTag = 3 + // ExtensionRangeOptionsDeclarationReservedTag is the tag number of the reserved + // field in the ExtensionRangeOptions.Declaration proto. + ExtensionRangeOptionsDeclarationReservedTag = 5 + // ExtensionRangeOptionsDeclarationRepeatedTag is the tag number of the repeated + // field in the ExtensionRangeOptions.Declaration proto. + ExtensionRangeOptionsDeclarationRepeatedTag = 6 + // ExtensionRangeOptionsFeaturesTag is the tag number of the features field in the + // ExtensionRangeOptions proto. + ExtensionRangeOptionsFeaturesTag = 50 + // ReservedRangeStartTag is the tag number of the start index in a reserved + // range proto. This field number is the same for both "flavors" of reserved + // ranges: DescriptorProto.ReservedRange and EnumDescriptorProto.EnumReservedRange. + ReservedRangeStartTag = 1 + // ReservedRangeEndTag is the tag number of the end index in a reserved + // range proto. This field number is the same for both "flavors" of reserved + // ranges: DescriptorProto.ReservedRange and EnumDescriptorProto.EnumReservedRange. + ReservedRangeEndTag = 2 + // FieldNameTag is the tag number of the name element in a field descriptor + // proto. + FieldNameTag = 1 + // FieldExtendeeTag is the tag number of the extendee element in a field + // descriptor proto. + FieldExtendeeTag = 2 + // FieldNumberTag is the tag number of the number element in a field + // descriptor proto. + FieldNumberTag = 3 + // FieldLabelTag is the tag number of the label element in a field + // descriptor proto. + FieldLabelTag = 4 + // FieldTypeTag is the tag number of the type element in a field descriptor + // proto. + FieldTypeTag = 5 + // FieldTypeNameTag is the tag number of the type name element in a field + // descriptor proto. + FieldTypeNameTag = 6 + // FieldDefaultTag is the tag number of the default value element in a + // field descriptor proto. + FieldDefaultTag = 7 + // FieldOptionsTag is the tag number of the options element in a field + // descriptor proto. + FieldOptionsTag = 8 + // FieldOptionsCTypeTag is the number of the ctype field in the + // FieldOptions proto. + FieldOptionsCTypeTag = 1 + // FieldOptionsPackedTag is the number of the packed field in the + // FieldOptions proto. + FieldOptionsPackedTag = 2 + // FieldOptionsLazyTag is the number of the lazy field in the + // FieldOptions proto. + FieldOptionsLazyTag = 5 + // FieldOptionsJSTypeTag is the number of the jstype field in the + // FieldOptions proto. + FieldOptionsJSTypeTag = 6 + // FieldOptionsUnverifiedLazyTag is the number of the unverified_lazy + // field in the FieldOptions proto. + FieldOptionsUnverifiedLazyTag = 15 + // FieldOptionsFeaturesTag is the tag number of the features field in the + // FieldOptions proto. + FieldOptionsFeaturesTag = 21 + // FieldOneofIndexTag is the tag number of the oneof index element in a + // field descriptor proto. + FieldOneofIndexTag = 9 + // FieldJSONNameTag is the tag number of the JSON name element in a field + // descriptor proto. + FieldJSONNameTag = 10 + // FieldProto3OptionalTag is the tag number of the proto3_optional element + // in a descriptor proto. + FieldProto3OptionalTag = 17 + // OneofNameTag is the tag number of the name element in a one-of + // descriptor proto. + OneofNameTag = 1 + // OneofOptionsTag is the tag number of the options element in a one-of + // descriptor proto. + OneofOptionsTag = 2 + // OneofOptionsFeaturesTag is the tag number of the features field in the + // OneofOptions proto. + OneofOptionsFeaturesTag = 1 + // EnumNameTag is the tag number of the name element in an enum descriptor + // proto. + EnumNameTag = 1 + // EnumValuesTag is the tag number of the values element in an enum + // descriptor proto. + EnumValuesTag = 2 + // EnumOptionsTag is the tag number of the options element in an enum + // descriptor proto. + EnumOptionsTag = 3 + // EnumOptionsFeaturesTag is the tag number of the features field in the + // EnumOptions proto. + EnumOptionsFeaturesTag = 7 + // EnumReservedRangesTag is the tag number of the reserved ranges element in + // an enum descriptor proto. + EnumReservedRangesTag = 4 + // EnumReservedNamesTag is the tag number of the reserved names element in + // an enum descriptor proto. + EnumReservedNamesTag = 5 + // EnumValNameTag is the tag number of the name element in an enum value + // descriptor proto. + EnumValNameTag = 1 + // EnumValNumberTag is the tag number of the number element in an enum + // value descriptor proto. + EnumValNumberTag = 2 + // EnumValOptionsTag is the tag number of the options element in an enum + // value descriptor proto. + EnumValOptionsTag = 3 + // EnumValOptionsFeaturesTag is the tag number of the features field in the + // EnumValueOptions proto. + EnumValOptionsFeaturesTag = 2 + // ServiceNameTag is the tag number of the name element in a service + // descriptor proto. + ServiceNameTag = 1 + // ServiceMethodsTag is the tag number of the methods element in a service + // descriptor proto. + ServiceMethodsTag = 2 + // ServiceOptionsTag is the tag number of the options element in a service + // descriptor proto. + ServiceOptionsTag = 3 + // ServiceOptionsFeaturesTag is the tag number of the features field in the + // ServiceOptions proto. + ServiceOptionsFeaturesTag = 34 + // MethodNameTag is the tag number of the name element in a method + // descriptor proto. + MethodNameTag = 1 + // MethodInputTag is the tag number of the input type element in a method + // descriptor proto. + MethodInputTag = 2 + // MethodOutputTag is the tag number of the output type element in a method + // descriptor proto. + MethodOutputTag = 3 + // MethodOptionsTag is the tag number of the options element in a method + // descriptor proto. + MethodOptionsTag = 4 + // MethodOptionsFeaturesTag is the tag number of the features field in the + // MethodOptions proto. + MethodOptionsFeaturesTag = 35 + // MethodInputStreamTag is the tag number of the input stream flag in a + // method descriptor proto. + MethodInputStreamTag = 5 + // MethodOutputStreamTag is the tag number of the output stream flag in a + // method descriptor proto. + MethodOutputStreamTag = 6 + + // UninterpretedOptionsTag is the tag number of the uninterpreted options + // element. All *Options messages use the same tag for the field that stores + // uninterpreted options. + UninterpretedOptionsTag = 999 + + // UninterpretedNameTag is the tag number of the name element in an + // uninterpreted options proto. + UninterpretedNameTag = 2 + // UninterpretedIdentTag is the tag number of the identifier value in an + // uninterpreted options proto. + UninterpretedIdentTag = 3 + // UninterpretedPosIntTag is the tag number of the positive int value in an + // uninterpreted options proto. + UninterpretedPosIntTag = 4 + // UninterpretedNegIntTag is the tag number of the negative int value in an + // uninterpreted options proto. + UninterpretedNegIntTag = 5 + // UninterpretedDoubleTag is the tag number of the double value in an + // uninterpreted options proto. + UninterpretedDoubleTag = 6 + // UninterpretedStringTag is the tag number of the string value in an + // uninterpreted options proto. + UninterpretedStringTag = 7 + // UninterpretedAggregateTag is the tag number of the aggregate value in an + // uninterpreted options proto. + UninterpretedAggregateTag = 8 + // UninterpretedNameNameTag is the tag number of the name element in an + // uninterpreted option name proto. + UninterpretedNameNameTag = 1 + + // AnyTypeURLTag is the tag number of the type_url field of the Any proto. + AnyTypeURLTag = 1 + // AnyValueTag is the tag number of the value field of the Any proto. + AnyValueTag = 2 + + // FeatureSetFieldPresenceTag is the tag number of the field_presence field + // in the FeatureSet proto. + FeatureSetFieldPresenceTag = 1 + // FeatureSetEnumTypeTag is the tag number of the enum_type field in the + // FeatureSet proto. + FeatureSetEnumTypeTag = 2 + // FeatureSetRepeatedFieldEncodingTag is the tag number of the repeated_field_encoding + // field in the FeatureSet proto. + FeatureSetRepeatedFieldEncodingTag = 3 + // FeatureSetUTF8ValidationTag is the tag number of the utf8_validation field + // in the FeatureSet proto. + FeatureSetUTF8ValidationTag = 4 + // FeatureSetMessageEncodingTag is the tag number of the message_encoding + // field in the FeatureSet proto. + FeatureSetMessageEncodingTag = 5 + // FeatureSetJSONFormatTag is the tag number of the json_format field in + // the FeatureSet proto. + FeatureSetJSONFormatTag = 6 +) diff --git a/vendor/github.com/bufbuild/protocompile/internal/types.go b/vendor/github.com/bufbuild/protocompile/internal/types.go new file mode 100644 index 00000000..04090a82 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/types.go @@ -0,0 +1,35 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "google.golang.org/protobuf/types/descriptorpb" + +var FieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{ + "double": descriptorpb.FieldDescriptorProto_TYPE_DOUBLE, + "float": descriptorpb.FieldDescriptorProto_TYPE_FLOAT, + "int32": descriptorpb.FieldDescriptorProto_TYPE_INT32, + "int64": descriptorpb.FieldDescriptorProto_TYPE_INT64, + "uint32": descriptorpb.FieldDescriptorProto_TYPE_UINT32, + "uint64": descriptorpb.FieldDescriptorProto_TYPE_UINT64, + "sint32": descriptorpb.FieldDescriptorProto_TYPE_SINT32, + "sint64": descriptorpb.FieldDescriptorProto_TYPE_SINT64, + "fixed32": descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + "fixed64": descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + "sfixed32": descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + "sfixed64": descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + "bool": descriptorpb.FieldDescriptorProto_TYPE_BOOL, + "string": descriptorpb.FieldDescriptorProto_TYPE_STRING, + "bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES, +} diff --git a/vendor/github.com/bufbuild/protocompile/internal/util.go b/vendor/github.com/bufbuild/protocompile/internal/util.go new file mode 100644 index 00000000..569cb3f1 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/internal/util.go @@ -0,0 +1,244 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "bytes" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// JSONName returns the default JSON name for a field with the given name. +// This mirrors the algorithm in protoc: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95 +func JSONName(name string) string { + var js []rune + nextUpper := false + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, r) + } + } + return string(js) +} + +// InitCap returns the given field name, but with the first letter capitalized. +func InitCap(name string) string { + r, sz := utf8.DecodeRuneInString(name) + return string(unicode.ToUpper(r)) + name[sz:] +} + +// CreatePrefixList returns a list of package prefixes to search when resolving +// a symbol name. If the given package is blank, it returns only the empty +// string. If the given package contains only one token, e.g. "foo", it returns +// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns +// successively shorter prefixes of the package and then the empty string. For +// example, for a package named "foo.bar.baz" it will return the following list: +// +// ["foo.bar.baz", "foo.bar", "foo", ""] +func CreatePrefixList(pkg string) []string { + if pkg == "" { + return []string{""} + } + + numDots := 0 + // one pass to pre-allocate the returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + numDots++ + } + } + if numDots == 0 { + return []string{pkg, ""} + } + + prefixes := make([]string, numDots+2) + // second pass to fill in returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + prefixes[numDots] = pkg[:i] + numDots-- + } + } + prefixes[0] = pkg + + return prefixes +} + +func WriteEscapedBytes(buf *bytes.Buffer, b []byte) { + // This uses the same algorithm as the protoc C++ code for escaping strings. + // The protoc C++ code in turn uses the abseil C++ library's CEscape function: + // https://github.com/abseil/abseil-cpp/blob/934f613818ffcb26c942dff4a80be9a4031c662c/absl/strings/escaping.cc#L406 + for _, c := range b { + switch c { + case '\n': + buf.WriteString("\\n") + case '\r': + buf.WriteString("\\r") + case '\t': + buf.WriteString("\\t") + case '"': + buf.WriteString("\\\"") + case '\'': + buf.WriteString("\\'") + case '\\': + buf.WriteString("\\\\") + default: + if c >= 0x20 && c < 0x7f { + // simple printable characters + buf.WriteByte(c) + } else { + // use octal escape for all other values + buf.WriteRune('\\') + buf.WriteByte('0' + ((c >> 6) & 0x7)) + buf.WriteByte('0' + ((c >> 3) & 0x7)) + buf.WriteByte('0' + (c & 0x7)) + } + } + } +} + +// IsZeroLocation returns true if the given loc is a zero value +// (which is returned from queries that have no result). +func IsZeroLocation(loc protoreflect.SourceLocation) bool { + return loc.Path == nil && + loc.StartLine == 0 && + loc.StartColumn == 0 && + loc.EndLine == 0 && + loc.EndColumn == 0 && + loc.LeadingDetachedComments == nil && + loc.LeadingComments == "" && + loc.TrailingComments == "" && + loc.Next == 0 +} + +// ComputePath computes the source location path for the given descriptor. +// The boolean value indicates whether the result is valid. If the path +// cannot be computed for d, the function returns nil, false. +func ComputePath(d protoreflect.Descriptor) (protoreflect.SourcePath, bool) { + _, ok := d.(protoreflect.FileDescriptor) + if ok { + return nil, true + } + var path protoreflect.SourcePath + for { + p := d.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return reverse(path), true + case protoreflect.MessageDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, FileMessagesTag) + case protoreflect.MessageDescriptor: + path = append(path, MessageNestedMessagesTag) + default: + return nil, false + } + case protoreflect.FieldDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + if d.IsExtension() { + path = append(path, FileExtensionsTag) + } else { + return nil, false + } + case protoreflect.MessageDescriptor: + if d.IsExtension() { + path = append(path, MessageExtensionsTag) + } else { + path = append(path, MessageFieldsTag) + } + default: + return nil, false + } + case protoreflect.OneofDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.MessageDescriptor); ok { + path = append(path, MessageOneofsTag) + } else { + return nil, false + } + case protoreflect.EnumDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, FileEnumsTag) + case protoreflect.MessageDescriptor: + path = append(path, MessageEnumsTag) + default: + return nil, false + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.EnumDescriptor); ok { + path = append(path, EnumValuesTag) + } else { + return nil, false + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.FileDescriptor); ok { + path = append(path, FileServicesTag) + } else { + return nil, false + } + case protoreflect.MethodDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.ServiceDescriptor); ok { + path = append(path, ServiceMethodsTag) + } else { + return nil, false + } + } + d = p + } +} + +// CanPack returns true if a repeated field of the given kind +// can use packed encoding. +func CanPack(k protoreflect.Kind) bool { + switch k { + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.StringKind, protoreflect.BytesKind: + return false + default: + return true + } +} + +func ClonePath(path protoreflect.SourcePath) protoreflect.SourcePath { + clone := make(protoreflect.SourcePath, len(path)) + copy(clone, path) + return clone +} + +func reverse(p protoreflect.SourcePath) protoreflect.SourcePath { + for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { + p[i], p[j] = p[j], p[i] + } + return p +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/descriptors.go b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go new file mode 100644 index 00000000..cd43dcce --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go @@ -0,0 +1,1884 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/internal/editions" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/protoutil" +) + +var ( + // These "noOp*" values are all descriptors. The protoreflect.Descriptor + // interface and its sub-interfaces are all marked with an unexported + // method so that they cannot be implemented outside of the google.golang.org/protobuf + // module. So, to provide implementations from this package, we must embed + // them. If we simply left the embedded interface field nil, then if/when + // new methods are added to the interfaces, it could induce panics in this + // package or users of this module (since trying to invoke one of these new + // methods would end up trying to call a method on a nil interface value). + // + // So instead of leaving the embedded interface fields nil, we embed an actual + // value. While new methods are unlikely to return the correct value (since + // the calls will be delegated to these no-op instances), it is a less + // dangerous latent bug than inducing a nil-dereference panic. + + noOpFile protoreflect.FileDescriptor + noOpMessage protoreflect.MessageDescriptor + noOpOneof protoreflect.OneofDescriptor + noOpField protoreflect.FieldDescriptor + noOpEnum protoreflect.EnumDescriptor + noOpEnumValue protoreflect.EnumValueDescriptor + noOpExtension protoreflect.ExtensionDescriptor + noOpService protoreflect.ServiceDescriptor + noOpMethod protoreflect.MethodDescriptor +) + +var ( + fieldPresenceField = editions.FeatureSetDescriptor.Fields().ByName("field_presence") + repeatedFieldEncodingField = editions.FeatureSetDescriptor.Fields().ByName("repeated_field_encoding") + messageEncodingField = editions.FeatureSetDescriptor.Fields().ByName("message_encoding") + enumTypeField = editions.FeatureSetDescriptor.Fields().ByName("enum_type") + jsonFormatField = editions.FeatureSetDescriptor.Fields().ByName("json_format") +) + +func init() { + noOpFile, _ = protodesc.NewFile( + &descriptorpb.FileDescriptorProto{ + Name: proto.String("no-op.proto"), + Syntax: proto.String("proto2"), + Dependency: []string{"google/protobuf/descriptor.proto"}, + MessageType: []*descriptorpb.DescriptorProto{ + { + Name: proto.String("NoOpMsg"), + Field: []*descriptorpb.FieldDescriptorProto{ + { + Name: proto.String("no_op"), + Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), + Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), + Number: proto.Int32(1), + JsonName: proto.String("noOp"), + OneofIndex: proto.Int32(0), + }, + }, + OneofDecl: []*descriptorpb.OneofDescriptorProto{ + { + Name: proto.String("no_op_oneof"), + }, + }, + }, + }, + EnumType: []*descriptorpb.EnumDescriptorProto{ + { + Name: proto.String("NoOpEnum"), + Value: []*descriptorpb.EnumValueDescriptorProto{ + { + Name: proto.String("NO_OP"), + Number: proto.Int32(0), + }, + }, + }, + }, + Extension: []*descriptorpb.FieldDescriptorProto{ + { + Extendee: proto.String(".google.protobuf.FileOptions"), + Name: proto.String("no_op"), + Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), + Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), + Number: proto.Int32(50000), + }, + }, + Service: []*descriptorpb.ServiceDescriptorProto{ + { + Name: proto.String("NoOpService"), + Method: []*descriptorpb.MethodDescriptorProto{ + { + Name: proto.String("NoOp"), + InputType: proto.String(".NoOpMsg"), + OutputType: proto.String(".NoOpMsg"), + }, + }, + }, + }, + }, + protoregistry.GlobalFiles, + ) + noOpMessage = noOpFile.Messages().Get(0) + noOpOneof = noOpMessage.Oneofs().Get(0) + noOpField = noOpMessage.Fields().Get(0) + noOpEnum = noOpFile.Enums().Get(0) + noOpEnumValue = noOpEnum.Values().Get(0) + noOpExtension = noOpFile.Extensions().Get(0) + noOpService = noOpFile.Services().Get(0) + noOpMethod = noOpService.Methods().Get(0) +} + +// This file contains implementations of protoreflect.Descriptor. Note that +// this is a hack since those interfaces have a "doNotImplement" tag +// interface therein. We do just enough to make dynamicpb happy; constructing +// a regular descriptor would fail because we haven't yet interpreted options +// at the point we need these, and some validations will fail if the options +// aren't present. + +type result struct { + protoreflect.FileDescriptor + parser.Result + prefix string + deps Files + + // A map of all descriptors keyed by their fully-qualified name (without + // any leading dot). + descriptors map[string]protoreflect.Descriptor + + // A set of imports that have been used in the course of linking and + // interpreting options. + usedImports map[string]struct{} + + // A map of AST nodes that represent identifiers in ast.FieldReferenceNodes + // to their fully-qualified name. The identifiers are for field names in + // message literals (in option values) that are extension fields. These names + // are resolved during linking and stored here, to be used to interpret options. + optionQualifiedNames map[ast.IdentValueNode]string + + imports fileImports + messages msgDescriptors + enums enumDescriptors + extensions extDescriptors + services svcDescriptors + srcLocations srcLocs +} + +var _ protoreflect.FileDescriptor = (*result)(nil) +var _ Result = (*result)(nil) +var _ protoutil.DescriptorProtoWrapper = (*result)(nil) +var _ editions.HasEdition = (*result)(nil) + +func (r *result) RemoveAST() { + r.Result = parser.ResultWithoutAST(r.FileDescriptorProto()) + r.optionQualifiedNames = nil +} + +func (r *result) AsProto() proto.Message { + return r.FileDescriptorProto() +} + +func (r *result) ParentFile() protoreflect.FileDescriptor { + return r +} + +func (r *result) Parent() protoreflect.Descriptor { + return nil +} + +func (r *result) Index() int { + return 0 +} + +func (r *result) Syntax() protoreflect.Syntax { + switch r.FileDescriptorProto().GetSyntax() { + case "proto2", "": + return protoreflect.Proto2 + case "proto3": + return protoreflect.Proto3 + case "editions": + return protoreflect.Editions + default: + return 0 // ??? + } +} + +func (r *result) Edition() int32 { + switch r.Syntax() { + case protoreflect.Proto2: + return int32(descriptorpb.Edition_EDITION_PROTO2) + case protoreflect.Proto3: + return int32(descriptorpb.Edition_EDITION_PROTO3) + case protoreflect.Editions: + return int32(r.FileDescriptorProto().GetEdition()) + default: + return int32(descriptorpb.Edition_EDITION_UNKNOWN) // ??? + } +} + +func (r *result) Name() protoreflect.Name { + return "" +} + +func (r *result) FullName() protoreflect.FullName { + return r.Package() +} + +func (r *result) IsPlaceholder() bool { + return false +} + +func (r *result) Options() protoreflect.ProtoMessage { + return r.FileDescriptorProto().Options +} + +func (r *result) Path() string { + return r.FileDescriptorProto().GetName() +} + +func (r *result) Package() protoreflect.FullName { + return protoreflect.FullName(r.FileDescriptorProto().GetPackage()) +} + +func (r *result) Imports() protoreflect.FileImports { + return &r.imports +} + +func (r *result) Enums() protoreflect.EnumDescriptors { + return &r.enums +} + +func (r *result) Messages() protoreflect.MessageDescriptors { + return &r.messages +} + +func (r *result) Extensions() protoreflect.ExtensionDescriptors { + return &r.extensions +} + +func (r *result) Services() protoreflect.ServiceDescriptors { + return &r.services +} + +func (r *result) PopulateSourceCodeInfo() { + srcLocProtos := asSourceLocations(r.FileDescriptorProto().GetSourceCodeInfo().GetLocation()) + srcLocIndex := computeSourceLocIndex(srcLocProtos) + r.srcLocations = srcLocs{file: r, locs: srcLocProtos, index: srcLocIndex} +} + +func (r *result) SourceLocations() protoreflect.SourceLocations { + return &r.srcLocations +} + +func computeSourceLocIndex(locs []protoreflect.SourceLocation) map[interface{}]int { + index := map[interface{}]int{} + for i, loc := range locs { + if loc.Next == 0 { + index[pathKey(loc.Path)] = i + } + } + return index +} + +func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) []protoreflect.SourceLocation { + locs := make([]protoreflect.SourceLocation, len(srcInfoProtos)) + prev := map[any]*protoreflect.SourceLocation{} + for i, loc := range srcInfoProtos { + var stLin, stCol, enLin, enCol int + if len(loc.Span) == 3 { + stLin, stCol, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2]) + enLin = stLin + } else { + stLin, stCol, enLin, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2]), int(loc.Span[3]) + } + locs[i] = protoreflect.SourceLocation{ + Path: loc.Path, + LeadingComments: loc.GetLeadingComments(), + LeadingDetachedComments: loc.GetLeadingDetachedComments(), + TrailingComments: loc.GetTrailingComments(), + StartLine: stLin, + StartColumn: stCol, + EndLine: enLin, + EndColumn: enCol, + } + str := pathKey(loc.Path) + pr := prev[str] + if pr != nil { + pr.Next = i + } + prev[str] = &locs[i] + } + return locs +} + +type fileImports struct { + protoreflect.FileImports + files []protoreflect.FileImport +} + +func (r *result) createImports() fileImports { + fd := r.FileDescriptorProto() + imps := make([]protoreflect.FileImport, len(fd.Dependency)) + for i, dep := range fd.Dependency { + desc := r.deps.FindFileByPath(dep) + imps[i] = protoreflect.FileImport{FileDescriptor: unwrap(desc)} + } + for _, publicIndex := range fd.PublicDependency { + imps[int(publicIndex)].IsPublic = true + } + for _, weakIndex := range fd.WeakDependency { + imps[int(weakIndex)].IsWeak = true + } + return fileImports{files: imps} +} + +func unwrap(descriptor protoreflect.FileDescriptor) protoreflect.FileDescriptor { + wrapped, ok := descriptor.(interface { + Unwrap() protoreflect.FileDescriptor + }) + if !ok { + return descriptor + } + unwrapped := wrapped.Unwrap() + if unwrapped == nil { + return descriptor // shouldn't ever happen + } + return unwrapped +} + +func (f *fileImports) Len() int { + return len(f.files) +} + +func (f *fileImports) Get(i int) protoreflect.FileImport { + return f.files[i] +} + +type srcLocs struct { + protoreflect.SourceLocations + file *result + locs []protoreflect.SourceLocation + index map[interface{}]int +} + +func (s *srcLocs) Len() int { + return len(s.locs) +} + +func (s *srcLocs) Get(i int) protoreflect.SourceLocation { + return s.locs[i] +} + +func (s *srcLocs) ByPath(p protoreflect.SourcePath) protoreflect.SourceLocation { + index, ok := s.index[pathKey(p)] + if !ok { + return protoreflect.SourceLocation{} + } + return s.locs[index] +} + +func (s *srcLocs) ByDescriptor(d protoreflect.Descriptor) protoreflect.SourceLocation { + if d.ParentFile() != s.file { + return protoreflect.SourceLocation{} + } + path, ok := internal.ComputePath(d) + if !ok { + return protoreflect.SourceLocation{} + } + return s.ByPath(path) +} + +type msgDescriptors struct { + protoreflect.MessageDescriptors + msgs []msgDescriptor +} + +func (r *result) createMessages(prefix string, parent protoreflect.Descriptor, msgProtos []*descriptorpb.DescriptorProto, pool *allocPool) msgDescriptors { + msgs := pool.getMessages(len(msgProtos)) + for i, msgProto := range msgProtos { + r.createMessageDescriptor(&msgs[i], msgProto, parent, i, prefix+msgProto.GetName(), pool) + } + return msgDescriptors{msgs: msgs} +} + +func (m *msgDescriptors) Len() int { + return len(m.msgs) +} + +func (m *msgDescriptors) Get(i int) protoreflect.MessageDescriptor { + return &m.msgs[i] +} + +func (m *msgDescriptors) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + for i := range m.msgs { + msg := &m.msgs[i] + if msg.Name() == s { + return msg + } + } + return nil +} + +type msgDescriptor struct { + protoreflect.MessageDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.DescriptorProto + fqn string + + fields fldDescriptors + oneofs oneofDescriptors + nestedMessages msgDescriptors + nestedEnums enumDescriptors + nestedExtensions extDescriptors + + extRanges fieldRanges + rsvdRanges fieldRanges + rsvdNames names +} + +var _ protoreflect.MessageDescriptor = (*msgDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*msgDescriptor)(nil) + +func (r *result) createMessageDescriptor(ret *msgDescriptor, md *descriptorpb.DescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) { + r.descriptors[fqn] = ret + + ret.MessageDescriptor = noOpMessage + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = md + ret.fqn = fqn + + prefix := fqn + "." + // NB: We MUST create fields before oneofs so that we can populate the + // set of fields that belong to the oneof + ret.fields = r.createFields(prefix, ret, md.Field, pool) + ret.oneofs = r.createOneofs(prefix, ret, md.OneofDecl, pool) + ret.nestedMessages = r.createMessages(prefix, ret, md.NestedType, pool) + ret.nestedEnums = r.createEnums(prefix, ret, md.EnumType, pool) + ret.nestedExtensions = r.createExtensions(prefix, ret, md.Extension, pool) + ret.extRanges = createFieldRanges(md.ExtensionRange) + ret.rsvdRanges = createFieldRanges(md.ReservedRange) + ret.rsvdNames = names{s: md.ReservedName} +} + +func (m *msgDescriptor) MessageDescriptorProto() *descriptorpb.DescriptorProto { + return m.proto +} + +func (m *msgDescriptor) AsProto() proto.Message { + return m.proto +} + +func (m *msgDescriptor) ParentFile() protoreflect.FileDescriptor { + return m.file +} + +func (m *msgDescriptor) Parent() protoreflect.Descriptor { + return m.parent +} + +func (m *msgDescriptor) Index() int { + return m.index +} + +func (m *msgDescriptor) Syntax() protoreflect.Syntax { + return m.file.Syntax() +} + +func (m *msgDescriptor) Name() protoreflect.Name { + return protoreflect.Name(m.proto.GetName()) +} + +func (m *msgDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(m.fqn) +} + +func (m *msgDescriptor) IsPlaceholder() bool { + return false +} + +func (m *msgDescriptor) Options() protoreflect.ProtoMessage { + return m.proto.Options +} + +func (m *msgDescriptor) IsMapEntry() bool { + return m.proto.Options.GetMapEntry() +} + +func (m *msgDescriptor) Fields() protoreflect.FieldDescriptors { + return &m.fields +} + +func (m *msgDescriptor) Oneofs() protoreflect.OneofDescriptors { + return &m.oneofs +} + +func (m *msgDescriptor) ReservedNames() protoreflect.Names { + return m.rsvdNames +} + +func (m *msgDescriptor) ReservedRanges() protoreflect.FieldRanges { + return m.rsvdRanges +} + +func (m *msgDescriptor) RequiredNumbers() protoreflect.FieldNumbers { + var indexes fieldNums + for _, fld := range m.proto.Field { + if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + indexes.s = append(indexes.s, fld.GetNumber()) + } + } + return indexes +} + +func (m *msgDescriptor) ExtensionRanges() protoreflect.FieldRanges { + return m.extRanges +} + +func (m *msgDescriptor) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { + return m.proto.ExtensionRange[i].Options +} + +func (m *msgDescriptor) Enums() protoreflect.EnumDescriptors { + return &m.nestedEnums +} + +func (m *msgDescriptor) Messages() protoreflect.MessageDescriptors { + return &m.nestedMessages +} + +func (m *msgDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return &m.nestedExtensions +} + +type names struct { + protoreflect.Names + s []string +} + +func (n names) Len() int { + return len(n.s) +} + +func (n names) Get(i int) protoreflect.Name { + return protoreflect.Name(n.s[i]) +} + +func (n names) Has(s protoreflect.Name) bool { + for _, name := range n.s { + if name == string(s) { + return true + } + } + return false +} + +type fieldNums struct { + protoreflect.FieldNumbers + s []int32 +} + +func (n fieldNums) Len() int { + return len(n.s) +} + +func (n fieldNums) Get(i int) protoreflect.FieldNumber { + return protoreflect.FieldNumber(n.s[i]) +} + +func (n fieldNums) Has(s protoreflect.FieldNumber) bool { + for _, num := range n.s { + if num == int32(s) { + return true + } + } + return false +} + +type fieldRanges struct { + protoreflect.FieldRanges + ranges [][2]protoreflect.FieldNumber +} + +type fieldRange interface { + GetStart() int32 + GetEnd() int32 +} + +func createFieldRanges[T fieldRange](rangeProtos []T) fieldRanges { + ranges := make([][2]protoreflect.FieldNumber, len(rangeProtos)) + for i, r := range rangeProtos { + ranges[i] = [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(r.GetStart()), + protoreflect.FieldNumber(r.GetEnd()), + } + } + return fieldRanges{ranges: ranges} +} + +func (f fieldRanges) Len() int { + return len(f.ranges) +} + +func (f fieldRanges) Get(i int) [2]protoreflect.FieldNumber { + return f.ranges[i] +} + +func (f fieldRanges) Has(n protoreflect.FieldNumber) bool { + for _, r := range f.ranges { + if r[0] <= n && r[1] > n { + return true + } + } + return false +} + +type enumDescriptors struct { + protoreflect.EnumDescriptors + enums []enumDescriptor +} + +func (r *result) createEnums(prefix string, parent protoreflect.Descriptor, enumProtos []*descriptorpb.EnumDescriptorProto, pool *allocPool) enumDescriptors { + enums := pool.getEnums(len(enumProtos)) + for i, enumProto := range enumProtos { + r.createEnumDescriptor(&enums[i], enumProto, parent, i, prefix+enumProto.GetName(), pool) + } + return enumDescriptors{enums: enums} +} + +func (e *enumDescriptors) Len() int { + return len(e.enums) +} + +func (e *enumDescriptors) Get(i int) protoreflect.EnumDescriptor { + return &e.enums[i] +} + +func (e *enumDescriptors) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + for i := range e.enums { + enum := &e.enums[i] + if enum.Name() == s { + return enum + } + } + return nil +} + +type enumDescriptor struct { + protoreflect.EnumDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.EnumDescriptorProto + fqn string + + values enValDescriptors + + rsvdRanges enumRanges + rsvdNames names +} + +var _ protoreflect.EnumDescriptor = (*enumDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*enumDescriptor)(nil) + +func (r *result) createEnumDescriptor(ret *enumDescriptor, ed *descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) { + r.descriptors[fqn] = ret + + ret.EnumDescriptor = noOpEnum + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = ed + ret.fqn = fqn + + // Unlike all other elements, the fully-qualified names of enum values + // are NOT scoped to their parent element (the enum), but rather to + // the enum's parent element. This follows C++ scoping rules for + // enum values. + prefix := strings.TrimSuffix(fqn, ed.GetName()) + ret.values = r.createEnumValues(prefix, ret, ed.Value, pool) + ret.rsvdRanges = createEnumRanges(ed.ReservedRange) + ret.rsvdNames = names{s: ed.ReservedName} +} + +func (e *enumDescriptor) EnumDescriptorProto() *descriptorpb.EnumDescriptorProto { + return e.proto +} + +func (e *enumDescriptor) AsProto() proto.Message { + return e.proto +} + +func (e *enumDescriptor) ParentFile() protoreflect.FileDescriptor { + return e.file +} + +func (e *enumDescriptor) Parent() protoreflect.Descriptor { + return e.parent +} + +func (e *enumDescriptor) Index() int { + return e.index +} + +func (e *enumDescriptor) Syntax() protoreflect.Syntax { + return e.file.Syntax() +} + +func (e *enumDescriptor) Name() protoreflect.Name { + return protoreflect.Name(e.proto.GetName()) +} + +func (e *enumDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(e.fqn) +} + +func (e *enumDescriptor) IsPlaceholder() bool { + return false +} + +func (e *enumDescriptor) Options() protoreflect.ProtoMessage { + return e.proto.Options +} + +func (e *enumDescriptor) Values() protoreflect.EnumValueDescriptors { + return &e.values +} + +func (e *enumDescriptor) ReservedNames() protoreflect.Names { + return e.rsvdNames +} + +func (e *enumDescriptor) ReservedRanges() protoreflect.EnumRanges { + return e.rsvdRanges +} + +func (e *enumDescriptor) IsClosed() bool { + enumType := resolveFeature(e, enumTypeField) + return descriptorpb.FeatureSet_EnumType(enumType.Enum()) == descriptorpb.FeatureSet_CLOSED +} + +type enumRanges struct { + protoreflect.EnumRanges + ranges [][2]protoreflect.EnumNumber +} + +func createEnumRanges(rangeProtos []*descriptorpb.EnumDescriptorProto_EnumReservedRange) enumRanges { + ranges := make([][2]protoreflect.EnumNumber, len(rangeProtos)) + for i, r := range rangeProtos { + ranges[i] = [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(r.GetStart()), + protoreflect.EnumNumber(r.GetEnd()), + } + } + return enumRanges{ranges: ranges} +} + +func (e enumRanges) Len() int { + return len(e.ranges) +} + +func (e enumRanges) Get(i int) [2]protoreflect.EnumNumber { + return e.ranges[i] +} + +func (e enumRanges) Has(n protoreflect.EnumNumber) bool { + for _, r := range e.ranges { + if r[0] <= n && r[1] >= n { + return true + } + } + return false +} + +type enValDescriptors struct { + protoreflect.EnumValueDescriptors + vals []enValDescriptor +} + +func (r *result) createEnumValues(prefix string, parent *enumDescriptor, enValProtos []*descriptorpb.EnumValueDescriptorProto, pool *allocPool) enValDescriptors { + vals := pool.getEnumValues(len(enValProtos)) + for i, enValProto := range enValProtos { + r.createEnumValueDescriptor(&vals[i], enValProto, parent, i, prefix+enValProto.GetName()) + } + return enValDescriptors{vals: vals} +} + +func (e *enValDescriptors) Len() int { + return len(e.vals) +} + +func (e *enValDescriptors) Get(i int) protoreflect.EnumValueDescriptor { + return &e.vals[i] +} + +func (e *enValDescriptors) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + for i := range e.vals { + val := &e.vals[i] + if val.Name() == s { + return val + } + } + return nil +} + +func (e *enValDescriptors) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + for i := range e.vals { + val := &e.vals[i] + if val.Number() == n { + return val + } + } + return nil +} + +type enValDescriptor struct { + protoreflect.EnumValueDescriptor + file *result + parent *enumDescriptor + index int + proto *descriptorpb.EnumValueDescriptorProto + fqn string +} + +var _ protoreflect.EnumValueDescriptor = (*enValDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*enValDescriptor)(nil) + +func (r *result) createEnumValueDescriptor(ret *enValDescriptor, ed *descriptorpb.EnumValueDescriptorProto, parent *enumDescriptor, index int, fqn string) { + r.descriptors[fqn] = ret + ret.EnumValueDescriptor = noOpEnumValue + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = ed + ret.fqn = fqn +} + +func (e *enValDescriptor) EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto { + return e.proto +} + +func (e *enValDescriptor) AsProto() proto.Message { + return e.proto +} + +func (e *enValDescriptor) ParentFile() protoreflect.FileDescriptor { + return e.file +} + +func (e *enValDescriptor) Parent() protoreflect.Descriptor { + return e.parent +} + +func (e *enValDescriptor) Index() int { + return e.index +} + +func (e *enValDescriptor) Syntax() protoreflect.Syntax { + return e.file.Syntax() +} + +func (e *enValDescriptor) Name() protoreflect.Name { + return protoreflect.Name(e.proto.GetName()) +} + +func (e *enValDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(e.fqn) +} + +func (e *enValDescriptor) IsPlaceholder() bool { + return false +} + +func (e *enValDescriptor) Options() protoreflect.ProtoMessage { + return e.proto.Options +} + +func (e *enValDescriptor) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(e.proto.GetNumber()) +} + +type extDescriptors struct { + protoreflect.ExtensionDescriptors + exts []extTypeDescriptor +} + +func (r *result) createExtensions(prefix string, parent protoreflect.Descriptor, extProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) extDescriptors { + exts := pool.getExtensions(len(extProtos)) + for i, extProto := range extProtos { + r.createExtTypeDescriptor(&exts[i], extProto, parent, i, prefix+extProto.GetName()) + } + return extDescriptors{exts: exts} +} + +func (e *extDescriptors) Len() int { + return len(e.exts) +} + +func (e *extDescriptors) Get(i int) protoreflect.ExtensionDescriptor { + return &e.exts[i] +} + +func (e *extDescriptors) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + for i := range e.exts { + ext := &e.exts[i] + if ext.Name() == s { + return ext + } + } + return nil +} + +type extTypeDescriptor struct { + protoreflect.ExtensionTypeDescriptor + field fldDescriptor +} + +var _ protoutil.DescriptorProtoWrapper = &extTypeDescriptor{} + +func (r *result) createExtTypeDescriptor(ret *extTypeDescriptor, fd *descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) { + r.descriptors[fqn] = ret + ret.field = fldDescriptor{FieldDescriptor: noOpExtension, file: r, parent: parent, index: index, proto: fd, fqn: fqn} + ret.ExtensionTypeDescriptor = dynamicpb.NewExtensionType(&ret.field).TypeDescriptor() +} + +func (e *extTypeDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return e.field.proto +} + +func (e *extTypeDescriptor) AsProto() proto.Message { + return e.field.proto +} + +type fldDescriptors struct { + protoreflect.FieldDescriptors + // We use pointers here, instead of flattened slice, because oneofs + // also have fields, but need to point to values in the parent + // message's fields. Even though they are pointers, in the containing + // message, we always allocate a flattened slice and then point into + // that, so we're still doing fewer allocations (2 per set of fields + // instead of 1 per each field). + fields []*fldDescriptor +} + +func (r *result) createFields(prefix string, parent *msgDescriptor, fldProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) fldDescriptors { + fields := pool.getFields(len(fldProtos)) + fieldPtrs := make([]*fldDescriptor, len(fldProtos)) + for i, fldProto := range fldProtos { + r.createFieldDescriptor(&fields[i], fldProto, parent, i, prefix+fldProto.GetName()) + fieldPtrs[i] = &fields[i] + } + return fldDescriptors{fields: fieldPtrs} +} + +func (f *fldDescriptors) Len() int { + return len(f.fields) +} + +func (f *fldDescriptors) Get(i int) protoreflect.FieldDescriptor { + return f.fields[i] +} + +func (f *fldDescriptors) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.Name() == s { + return fld + } + } + return nil +} + +func (f *fldDescriptors) ByJSONName(s string) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.JSONName() == s { + return fld + } + } + return nil +} + +func (f *fldDescriptors) ByTextName(s string) protoreflect.FieldDescriptor { + fld := f.ByName(protoreflect.Name(s)) + if fld != nil { + return fld + } + // Groups use type name instead, so we fallback to slow search + for _, fld := range f.fields { + if fld.TextName() == s { + return fld + } + } + return nil +} + +func (f *fldDescriptors) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + for _, fld := range f.fields { + if fld.Number() == n { + return fld + } + } + return nil +} + +type fldDescriptor struct { + protoreflect.FieldDescriptor + file *result + parent protoreflect.Descriptor + index int + proto *descriptorpb.FieldDescriptorProto + fqn string + + msgType protoreflect.MessageDescriptor + extendee protoreflect.MessageDescriptor + enumType protoreflect.EnumDescriptor + oneof protoreflect.OneofDescriptor +} + +var _ protoreflect.FieldDescriptor = (*fldDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*fldDescriptor)(nil) + +func (r *result) createFieldDescriptor(ret *fldDescriptor, fd *descriptorpb.FieldDescriptorProto, parent *msgDescriptor, index int, fqn string) { + r.descriptors[fqn] = ret + ret.FieldDescriptor = noOpField + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = fd + ret.fqn = fqn +} + +func (f *fldDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return f.proto +} + +func (f *fldDescriptor) AsProto() proto.Message { + return f.proto +} + +func (f *fldDescriptor) ParentFile() protoreflect.FileDescriptor { + return f.file +} + +func (f *fldDescriptor) Parent() protoreflect.Descriptor { + return f.parent +} + +func (f *fldDescriptor) Index() int { + return f.index +} + +func (f *fldDescriptor) Syntax() protoreflect.Syntax { + return f.file.Syntax() +} + +func (f *fldDescriptor) Name() protoreflect.Name { + return protoreflect.Name(f.proto.GetName()) +} + +func (f *fldDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(f.fqn) +} + +func (f *fldDescriptor) IsPlaceholder() bool { + return false +} + +func (f *fldDescriptor) Options() protoreflect.ProtoMessage { + return f.proto.Options +} + +func (f *fldDescriptor) Number() protoreflect.FieldNumber { + return protoreflect.FieldNumber(f.proto.GetNumber()) +} + +func (f *fldDescriptor) Cardinality() protoreflect.Cardinality { + switch f.proto.GetLabel() { + case descriptorpb.FieldDescriptorProto_LABEL_REPEATED: + return protoreflect.Repeated + case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED: + return protoreflect.Required + case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL: + if f.Syntax() == protoreflect.Editions { + // Editions does not use label to indicate required. It instead + // uses a feature, and label is always optional. + fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum()) + if fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED { + return protoreflect.Required + } + } + return protoreflect.Optional + default: + return 0 + } +} + +func (f *fldDescriptor) Kind() protoreflect.Kind { + if f.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && f.Syntax() == protoreflect.Editions && + !f.IsMap() && !f.parentIsMap() { + // In editions, "group encoding" (aka "delimited encoding") is toggled + // via a feature. So we report group kind when that feature is enabled. + messageEncoding := resolveFeature(f, messageEncodingField) + if descriptorpb.FeatureSet_MessageEncoding(messageEncoding.Enum()) == descriptorpb.FeatureSet_DELIMITED { + return protoreflect.GroupKind + } + } + return protoreflect.Kind(f.proto.GetType()) +} + +func (f *fldDescriptor) HasJSONName() bool { + return f.proto.JsonName != nil +} + +func (f *fldDescriptor) JSONName() string { + if f.IsExtension() { + return f.TextName() + } + return f.proto.GetJsonName() +} + +func (f *fldDescriptor) TextName() string { + if f.IsExtension() { + return fmt.Sprintf("[%s]", f.FullName()) + } + if f.looksLikeGroup() { + // groups use the type name + return string(protoreflect.FullName(f.proto.GetTypeName()).Name()) + } + return string(f.Name()) +} + +func (f *fldDescriptor) looksLikeGroup() bool { + // It looks like a group if it uses group/delimited encoding (checked via f.Kind) + // and the message type is a sibling whose name is a mixed-case version of the field name. + return f.Kind() == protoreflect.GroupKind && + f.Message().FullName().Parent() == f.FullName().Parent() && + string(f.Name()) == strings.ToLower(string(f.Message().Name())) +} + +func (f *fldDescriptor) HasPresence() bool { + if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + if f.IsExtension() || + f.Kind() == protoreflect.MessageKind || f.Kind() == protoreflect.GroupKind || + f.proto.OneofIndex != nil { + return true + } + fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum()) + return fieldPresence == descriptorpb.FeatureSet_EXPLICIT || fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED +} + +func (f *fldDescriptor) IsExtension() bool { + return f.proto.GetExtendee() != "" +} + +func (f *fldDescriptor) HasOptionalKeyword() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + return false + } + if f.proto.GetProto3Optional() { + // NB: This smells weird to return false here. If the proto3_optional field + // is set, it's because the keyword WAS present. However, the Go runtime + // returns false for this case, so we mirror that behavior. + return !f.IsExtension() + } + // If it's optional, but not a proto3 optional, then the keyword is only + // present for proto2 files, for fields that are not part of a oneof. + return f.file.Syntax() == protoreflect.Proto2 && f.proto.OneofIndex == nil +} + +func (f *fldDescriptor) IsWeak() bool { + return f.proto.Options.GetWeak() +} + +func (f *fldDescriptor) IsPacked() bool { + if f.Cardinality() != protoreflect.Repeated || !internal.CanPack(f.Kind()) { + return false + } + opts := f.proto.GetOptions() + if opts != nil && opts.Packed != nil { + // packed option is set explicitly + return *opts.Packed + } + fieldEncoding := resolveFeature(f, repeatedFieldEncodingField) + return descriptorpb.FeatureSet_RepeatedFieldEncoding(fieldEncoding.Enum()) == descriptorpb.FeatureSet_PACKED +} + +func (f *fldDescriptor) IsList() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + return !f.isMapEntry() +} + +func (f *fldDescriptor) IsMap() bool { + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + if f.IsExtension() { + return false + } + return f.isMapEntry() +} + +func (f *fldDescriptor) isMapEntry() bool { + if f.proto.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE { + return false + } + return f.Message().IsMapEntry() +} + +func (f *fldDescriptor) parentIsMap() bool { + parent, ok := f.parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} + +func (f *fldDescriptor) MapKey() protoreflect.FieldDescriptor { + if !f.IsMap() { + return nil + } + return f.Message().Fields().ByNumber(1) +} + +func (f *fldDescriptor) MapValue() protoreflect.FieldDescriptor { + if !f.IsMap() { + return nil + } + return f.Message().Fields().ByNumber(2) +} + +func (f *fldDescriptor) HasDefault() bool { + return f.proto.DefaultValue != nil +} + +func (f *fldDescriptor) Default() protoreflect.Value { + // We only return a valid value for scalar fields + if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED || + f.Kind() == protoreflect.GroupKind || f.Kind() == protoreflect.MessageKind { + return protoreflect.Value{} + } + + if f.proto.DefaultValue != nil { + defVal := f.parseDefaultValue(f.proto.GetDefaultValue()) + if defVal.IsValid() { + return defVal + } + // if we cannot parse a valid value, fall back to zero value below + } + + // No custom default value, so return the zero value for the type + switch f.Kind() { + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(f.Enum().Values().Get(0).Number()) + case protoreflect.GroupKind, protoreflect.MessageKind: + return protoreflect.ValueOfMessage(dynamicpb.NewMessage(f.Message())) + default: + panic(fmt.Sprintf("unknown kind: %v", f.Kind())) + } +} + +func (f *fldDescriptor) parseDefaultValue(val string) protoreflect.Value { + switch f.Kind() { + case protoreflect.EnumKind: + vd := f.Enum().Values().ByName(protoreflect.Name(val)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()) + } + return protoreflect.Value{} + case protoreflect.BoolKind: + switch val { + case "true": + return protoreflect.ValueOfBool(true) + case "false": + return protoreflect.ValueOfBool(false) + default: + return protoreflect.Value{} + } + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes([]byte(unescape(val))) + case protoreflect.StringKind: + return protoreflect.ValueOfString(val) + case protoreflect.FloatKind: + if f, err := strconv.ParseFloat(val, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(f)) + } + return protoreflect.Value{} + case protoreflect.DoubleKind: + if f, err := strconv.ParseFloat(val, 64); err == nil { + return protoreflect.ValueOfFloat64(f) + } + return protoreflect.Value{} + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if i, err := strconv.ParseInt(val, 10, 32); err == nil { + return protoreflect.ValueOfInt32(int32(i)) + } + return protoreflect.Value{} + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if i, err := strconv.ParseUint(val, 10, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(i)) + } + return protoreflect.Value{} + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if i, err := strconv.ParseInt(val, 10, 64); err == nil { + return protoreflect.ValueOfInt64(i) + } + return protoreflect.Value{} + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if i, err := strconv.ParseUint(val, 10, 64); err == nil { + return protoreflect.ValueOfUint64(i) + } + return protoreflect.Value{} + default: + return protoreflect.Value{} + } +} + +func unescape(s string) string { + // protoc encodes default values for 'bytes' fields using C escaping, + // so this function reverses that escaping + out := make([]byte, 0, len(s)) + var buf [4]byte + for len(s) > 0 { + if s[0] != '\\' || len(s) < 2 { + // not escape sequence, or too short to be well-formed escape + out = append(out, s[0]) + s = s[1:] + continue + } + nextIndex := 2 // by default, skip '\' + escaped character + switch s[1] { + case 'x', 'X': + n := matchPrefix(s[2:], 2, isHex) + if n == 0 { + // bad escape + out = append(out, s[:2]...) + } else { + c, err := strconv.ParseUint(s[2:2+n], 16, 8) + if err != nil { + // shouldn't really happen... + out = append(out, s[:2+n]...) + } else { + out = append(out, byte(c)) + } + nextIndex = 2 + n + } + case '0', '1', '2', '3', '4', '5', '6', '7': + n := 1 + matchPrefix(s[2:], 2, isOctal) + c, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil || c > 0xff { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(c)) + } + nextIndex = 1 + n + case 'u': + if len(s) < 6 { + // bad escape + out = append(out, s...) + nextIndex = len(s) + } else { + c, err := strconv.ParseUint(s[2:6], 16, 16) + if err != nil { + // bad escape + out = append(out, s[:6]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + nextIndex = 6 + } + case 'U': + if len(s) < 10 { + // bad escape + out = append(out, s...) + nextIndex = len(s) + } else { + c, err := strconv.ParseUint(s[2:10], 16, 32) + if err != nil || c > 0x10ffff { + // bad escape + out = append(out, s[:10]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + nextIndex = 10 + } + case 'a': + out = append(out, '\a') + case 'b': + out = append(out, '\b') + case 'f': + out = append(out, '\f') + case 'n': + out = append(out, '\n') + case 'r': + out = append(out, '\r') + case 't': + out = append(out, '\t') + case 'v': + out = append(out, '\v') + case '\\', '\'', '"', '?': + out = append(out, s[1]) + default: + // invalid escape, just copy it as-is + out = append(out, s[:2]...) + } + s = s[nextIndex:] + } + return string(out) +} + +func isOctal(b byte) bool { return b >= '0' && b <= '7' } +func isHex(b byte) bool { + return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F') +} +func matchPrefix(s string, limit int, fn func(byte) bool) int { + l := len(s) + if l > limit { + l = limit + } + i := 0 + for ; i < l; i++ { + if !fn(s[i]) { + return i + } + } + return i +} + +func (f *fldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := f.Enum() + if ed == nil { + return nil + } + if f.proto.DefaultValue != nil { + if val := ed.Values().ByName(protoreflect.Name(f.proto.GetDefaultValue())); val != nil { + return val + } + } + // if no default specified in source, return nil + return nil +} + +func (f *fldDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + return f.oneof +} + +func (f *fldDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + if f.extendee != nil { + return f.extendee + } + return f.parent.(protoreflect.MessageDescriptor) +} + +func (f *fldDescriptor) Enum() protoreflect.EnumDescriptor { + return f.enumType +} + +func (f *fldDescriptor) Message() protoreflect.MessageDescriptor { + return f.msgType +} + +type oneofDescriptors struct { + protoreflect.OneofDescriptors + oneofs []oneofDescriptor +} + +func (r *result) createOneofs(prefix string, parent *msgDescriptor, ooProtos []*descriptorpb.OneofDescriptorProto, pool *allocPool) oneofDescriptors { + oos := pool.getOneofs(len(ooProtos)) + for i, fldProto := range ooProtos { + r.createOneofDescriptor(&oos[i], fldProto, parent, i, prefix+fldProto.GetName()) + } + return oneofDescriptors{oneofs: oos} +} + +func (o *oneofDescriptors) Len() int { + return len(o.oneofs) +} + +func (o *oneofDescriptors) Get(i int) protoreflect.OneofDescriptor { + return &o.oneofs[i] +} + +func (o *oneofDescriptors) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + for i := range o.oneofs { + oo := &o.oneofs[i] + if oo.Name() == s { + return oo + } + } + return nil +} + +type oneofDescriptor struct { + protoreflect.OneofDescriptor + file *result + parent *msgDescriptor + index int + proto *descriptorpb.OneofDescriptorProto + fqn string + + fields fldDescriptors +} + +var _ protoreflect.OneofDescriptor = (*oneofDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*oneofDescriptor)(nil) + +func (r *result) createOneofDescriptor(ret *oneofDescriptor, ood *descriptorpb.OneofDescriptorProto, parent *msgDescriptor, index int, fqn string) { + r.descriptors[fqn] = ret + ret.OneofDescriptor = noOpOneof + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = ood + ret.fqn = fqn + + var fields []*fldDescriptor + for _, fld := range parent.fields.fields { + if fld.proto.OneofIndex != nil && int(fld.proto.GetOneofIndex()) == index { + fields = append(fields, fld) + } + } + ret.fields = fldDescriptors{fields: fields} +} + +func (o *oneofDescriptor) OneofDescriptorProto() *descriptorpb.OneofDescriptorProto { + return o.proto +} + +func (o *oneofDescriptor) AsProto() proto.Message { + return o.proto +} + +func (o *oneofDescriptor) ParentFile() protoreflect.FileDescriptor { + return o.file +} + +func (o *oneofDescriptor) Parent() protoreflect.Descriptor { + return o.parent +} + +func (o *oneofDescriptor) Index() int { + return o.index +} + +func (o *oneofDescriptor) Syntax() protoreflect.Syntax { + return o.file.Syntax() +} + +func (o *oneofDescriptor) Name() protoreflect.Name { + return protoreflect.Name(o.proto.GetName()) +} + +func (o *oneofDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(o.fqn) +} + +func (o *oneofDescriptor) IsPlaceholder() bool { + return false +} + +func (o *oneofDescriptor) Options() protoreflect.ProtoMessage { + return o.proto.Options +} + +func (o *oneofDescriptor) IsSynthetic() bool { + for _, fld := range o.parent.proto.GetField() { + if fld.OneofIndex != nil && int(fld.GetOneofIndex()) == o.index { + return fld.GetProto3Optional() + } + } + return false // NB: we should never get here +} + +func (o *oneofDescriptor) Fields() protoreflect.FieldDescriptors { + return &o.fields +} + +type svcDescriptors struct { + protoreflect.ServiceDescriptors + svcs []svcDescriptor +} + +func (r *result) createServices(prefix string, svcProtos []*descriptorpb.ServiceDescriptorProto, pool *allocPool) svcDescriptors { + svcs := pool.getServices(len(svcProtos)) + for i, svcProto := range svcProtos { + r.createServiceDescriptor(&svcs[i], svcProto, i, prefix+svcProto.GetName(), pool) + } + return svcDescriptors{svcs: svcs} +} + +func (s *svcDescriptors) Len() int { + return len(s.svcs) +} + +func (s *svcDescriptors) Get(i int) protoreflect.ServiceDescriptor { + return &s.svcs[i] +} + +func (s *svcDescriptors) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { + for i := range s.svcs { + svc := &s.svcs[i] + if svc.Name() == n { + return svc + } + } + return nil +} + +type svcDescriptor struct { + protoreflect.ServiceDescriptor + file *result + index int + proto *descriptorpb.ServiceDescriptorProto + fqn string + + methods mtdDescriptors +} + +var _ protoreflect.ServiceDescriptor = (*svcDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*svcDescriptor)(nil) + +func (r *result) createServiceDescriptor(ret *svcDescriptor, sd *descriptorpb.ServiceDescriptorProto, index int, fqn string, pool *allocPool) { + r.descriptors[fqn] = ret + ret.ServiceDescriptor = noOpService + ret.file = r + ret.index = index + ret.proto = sd + ret.fqn = fqn + + prefix := fqn + "." + ret.methods = r.createMethods(prefix, ret, sd.Method, pool) +} + +func (s *svcDescriptor) ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto { + return s.proto +} + +func (s *svcDescriptor) AsProto() proto.Message { + return s.proto +} + +func (s *svcDescriptor) ParentFile() protoreflect.FileDescriptor { + return s.file +} + +func (s *svcDescriptor) Parent() protoreflect.Descriptor { + return s.file +} + +func (s *svcDescriptor) Index() int { + return s.index +} + +func (s *svcDescriptor) Syntax() protoreflect.Syntax { + return s.file.Syntax() +} + +func (s *svcDescriptor) Name() protoreflect.Name { + return protoreflect.Name(s.proto.GetName()) +} + +func (s *svcDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(s.fqn) +} + +func (s *svcDescriptor) IsPlaceholder() bool { + return false +} + +func (s *svcDescriptor) Options() protoreflect.ProtoMessage { + return s.proto.Options +} + +func (s *svcDescriptor) Methods() protoreflect.MethodDescriptors { + return &s.methods +} + +type mtdDescriptors struct { + protoreflect.MethodDescriptors + mtds []mtdDescriptor +} + +func (r *result) createMethods(prefix string, parent *svcDescriptor, mtdProtos []*descriptorpb.MethodDescriptorProto, pool *allocPool) mtdDescriptors { + mtds := pool.getMethods(len(mtdProtos)) + for i, mtdProto := range mtdProtos { + r.createMethodDescriptor(&mtds[i], mtdProto, parent, i, prefix+mtdProto.GetName()) + } + return mtdDescriptors{mtds: mtds} +} + +func (m *mtdDescriptors) Len() int { + return len(m.mtds) +} + +func (m *mtdDescriptors) Get(i int) protoreflect.MethodDescriptor { + return &m.mtds[i] +} + +func (m *mtdDescriptors) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { + for i := range m.mtds { + mtd := &m.mtds[i] + if mtd.Name() == n { + return mtd + } + } + return nil +} + +type mtdDescriptor struct { + protoreflect.MethodDescriptor + file *result + parent *svcDescriptor + index int + proto *descriptorpb.MethodDescriptorProto + fqn string + + inputType, outputType protoreflect.MessageDescriptor +} + +var _ protoreflect.MethodDescriptor = (*mtdDescriptor)(nil) +var _ protoutil.DescriptorProtoWrapper = (*mtdDescriptor)(nil) + +func (r *result) createMethodDescriptor(ret *mtdDescriptor, mtd *descriptorpb.MethodDescriptorProto, parent *svcDescriptor, index int, fqn string) { + r.descriptors[fqn] = ret + ret.MethodDescriptor = noOpMethod + ret.file = r + ret.parent = parent + ret.index = index + ret.proto = mtd + ret.fqn = fqn +} + +func (m *mtdDescriptor) MethodDescriptorProto() *descriptorpb.MethodDescriptorProto { + return m.proto +} + +func (m *mtdDescriptor) AsProto() proto.Message { + return m.proto +} + +func (m *mtdDescriptor) ParentFile() protoreflect.FileDescriptor { + return m.file +} + +func (m *mtdDescriptor) Parent() protoreflect.Descriptor { + return m.parent +} + +func (m *mtdDescriptor) Index() int { + return m.index +} + +func (m *mtdDescriptor) Syntax() protoreflect.Syntax { + return m.file.Syntax() +} + +func (m *mtdDescriptor) Name() protoreflect.Name { + return protoreflect.Name(m.proto.GetName()) +} + +func (m *mtdDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(m.fqn) +} + +func (m *mtdDescriptor) IsPlaceholder() bool { + return false +} + +func (m *mtdDescriptor) Options() protoreflect.ProtoMessage { + return m.proto.Options +} + +func (m *mtdDescriptor) Input() protoreflect.MessageDescriptor { + return m.inputType +} + +func (m *mtdDescriptor) Output() protoreflect.MessageDescriptor { + return m.outputType +} + +func (m *mtdDescriptor) IsStreamingClient() bool { + return m.proto.GetClientStreaming() +} + +func (m *mtdDescriptor) IsStreamingServer() bool { + return m.proto.GetServerStreaming() +} + +func (r *result) FindImportByPath(path string) File { + return r.deps.FindFileByPath(path) +} + +func (r *result) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + return findExtension(r, msg, tag) +} + +func (r *result) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { + fqn := strings.TrimPrefix(string(name), ".") + return r.descriptors[fqn] +} + +func (r *result) hasSource() bool { + n := r.FileNode() + _, ok := n.(*ast.FileNode) + return ok +} + +// resolveFeature resolves a feature for the given descriptor. If the given element +// is in a proto2 or proto3 syntax file, this skips resolution and just returns the +// relevant default (since such files are not allowed to override features). +// +// If neither the given element nor any of its ancestors override the given feature, +// the relevant default is returned. +func resolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) protoreflect.Value { + edition := editions.GetEdition(element) + if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 { + // these syntax levels can't specify features, so we can short-circuit the search + // through the descriptor hierarchy for feature overrides + defaults := editions.GetEditionDefaults(edition) + return defaults.ProtoReflect().Get(feature) // returns default value if field is not present + } + val, err := editions.ResolveFeature(element, feature) + if err == nil && val.IsValid() { + return val + } + defaults := editions.GetEditionDefaults(edition) + return defaults.ProtoReflect().Get(feature) +} + +func isJSONCompliant(d protoreflect.Descriptor) bool { + jsonFormat := resolveFeature(d, jsonFormatField) + return descriptorpb.FeatureSet_JsonFormat(jsonFormat.Enum()) == descriptorpb.FeatureSet_ALLOW +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/doc.go b/vendor/github.com/bufbuild/protocompile/linker/doc.go new file mode 100644 index 00000000..93e68d9c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/doc.go @@ -0,0 +1,48 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package linker contains logic and APIs related to linking a protobuf file. +// The process of linking involves resolving all symbol references to the +// referenced descriptor. The result of linking is a "rich" descriptor that +// is more useful than just a descriptor proto since the links allow easy +// traversal of a protobuf type schema and the relationships between elements. +// +// # Files +// +// This package uses an augmentation to protoreflect.FileDescriptor instances +// in the form of the File interface. There are also factory functions for +// promoting a FileDescriptor into a linker.File. This new interface provides +// additional methods for resolving symbols in the file. +// +// This interface is both the result of linking but also an input to the linking +// process, as all dependencies of a file to be linked must be provided in this +// form. The actual result of the Link function, a Result, is an even broader +// interface than File: The linker.Result interface provides even more functions, +// which are needed for subsequent compilation steps: interpreting options and +// generating source code info. +// +// # Symbols +// +// This package has a type named Symbols which represents a symbol table. This +// is usually an internal detail when linking, but callers can provide an +// instance so that symbols across multiple compile/link operations all have +// access to the same table. This allows for detection of cases where multiple +// files try to declare elements with conflicting fully-qualified names or +// declare extensions for a particular extendable message that have conflicting +// tag numbers. +// +// The calling code simply uses the same Symbols instance across all compile +// operations and if any files processed have such conflicts, they can be +// reported. +package linker diff --git a/vendor/github.com/bufbuild/protocompile/linker/files.go b/vendor/github.com/bufbuild/protocompile/linker/files.go new file mode 100644 index 00000000..51ce3a8b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/files.go @@ -0,0 +1,366 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/walk" +) + +// File is like a super-powered protoreflect.FileDescriptor. It includes helpful +// methods for looking up elements in the descriptor and can be used to create a +// resolver for the entire transitive closure of the file's dependencies. (See +// ResolverFromFile.) +type File interface { + protoreflect.FileDescriptor + // FindDescriptorByName returns the given named element that is defined in + // this file. If no such element exists, nil is returned. + FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor + // FindImportByPath returns the File corresponding to the given import path. + // If this file does not import the given path, nil is returned. + FindImportByPath(path string) File + // FindExtensionByNumber returns the extension descriptor for the given tag + // that extends the given message name. If no such extension is defined in this + // file, nil is returned. + FindExtensionByNumber(message protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor +} + +// NewFile converts a protoreflect.FileDescriptor to a File. The given deps must +// contain all dependencies/imports of f. Also see NewFileRecursive. +func NewFile(f protoreflect.FileDescriptor, deps Files) (File, error) { + if asFile, ok := f.(File); ok { + return asFile, nil + } + checkedDeps := make(Files, f.Imports().Len()) + for i := 0; i < f.Imports().Len(); i++ { + imprt := f.Imports().Get(i) + dep := deps.FindFileByPath(imprt.Path()) + if dep == nil { + return nil, fmt.Errorf("cannot create File for %q: missing dependency for %q", f.Path(), imprt.Path()) + } + checkedDeps[i] = dep + } + return newFile(f, checkedDeps) +} + +func newFile(f protoreflect.FileDescriptor, deps Files) (File, error) { + descs := map[protoreflect.FullName]protoreflect.Descriptor{} + err := walk.Descriptors(f, func(d protoreflect.Descriptor) error { + if _, ok := descs[d.FullName()]; ok { + return fmt.Errorf("file %q contains multiple elements with the name %s", f.Path(), d.FullName()) + } + descs[d.FullName()] = d + return nil + }) + if err != nil { + return nil, err + } + return &file{ + FileDescriptor: f, + descs: descs, + deps: deps, + }, nil +} + +// NewFileRecursive recursively converts a protoreflect.FileDescriptor to a File. +// If f has any dependencies/imports, they are converted, too, including any and +// all transitive dependencies. +// +// If f already implements File, it is returned unchanged. +func NewFileRecursive(f protoreflect.FileDescriptor) (File, error) { + if asFile, ok := f.(File); ok { + return asFile, nil + } + return newFileRecursive(f, map[protoreflect.FileDescriptor]File{}) +} + +func newFileRecursive(fd protoreflect.FileDescriptor, seen map[protoreflect.FileDescriptor]File) (File, error) { + if res, ok := seen[fd]; ok { + if res == nil { + return nil, fmt.Errorf("import cycle encountered: file %s transitively imports itself", fd.Path()) + } + return res, nil + } + + if f, ok := fd.(File); ok { + seen[fd] = f + return f, nil + } + + seen[fd] = nil + deps := make([]File, fd.Imports().Len()) + for i := 0; i < fd.Imports().Len(); i++ { + imprt := fd.Imports().Get(i) + dep, err := newFileRecursive(imprt, seen) + if err != nil { + return nil, err + } + deps[i] = dep + } + + f, err := newFile(fd, deps) + if err != nil { + return nil, err + } + seen[fd] = f + return f, nil +} + +type file struct { + protoreflect.FileDescriptor + descs map[protoreflect.FullName]protoreflect.Descriptor + deps Files +} + +var _ File = (*file)(nil) + +func (f *file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { + return f.descs[name] +} + +func (f *file) FindImportByPath(path string) File { + return f.deps.FindFileByPath(path) +} + +func (f *file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + return findExtension(f, msg, tag) +} + +func (f *file) Unwrap() protoreflect.FileDescriptor { + return f.FileDescriptor +} + +// Files represents a set of protobuf files. It is a slice of File values, but +// also provides a method for easily looking up files by path and name. +type Files []File + +// FindFileByPath finds a file in f that has the given path and name. If f +// contains no such file, nil is returned. +func (f Files) FindFileByPath(path string) File { + for _, file := range f { + if file.Path() == path { + return file + } + } + return nil +} + +// AsResolver returns a Resolver that uses f as the source of descriptors. If +// a given query cannot be answered with the files in f, the query will fail +// with a protoregistry.NotFound error. The implementation just delegates calls +// to each file until a result is found. +// +// Also see ResolverFromFile. +func (f Files) AsResolver() Resolver { + return filesResolver(f) +} + +// Resolver is an interface that can resolve various kinds of queries about +// descriptors. It satisfies the resolver interfaces defined in protodesc +// and protoregistry packages. +type Resolver interface { + protodesc.Resolver + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver +} + +// ResolverFromFile returns a Resolver that can resolve any element that is +// visible to the given file. It will search the given file, its imports, and +// any transitive public imports. +// +// Note that this function does not compute any additional indexes for efficient +// search, so queries generally take linear time, O(n) where n is the number of +// files whose elements are visible to the given file. Queries for an extension +// by number have runtime complexity that is linear with the number of messages +// and extensions defined across those files. +func ResolverFromFile(f File) Resolver { + return fileResolver{f: f} +} + +type fileResolver struct { + f File +} + +func (r fileResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return resolveInFile(r.f, false, nil, func(f File) (protoreflect.FileDescriptor, error) { + if f.Path() == path { + return f, nil + } + return nil, protoregistry.NotFound + }) +} + +func (r fileResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return resolveInFile(r.f, false, nil, func(f File) (protoreflect.Descriptor, error) { + if d := f.FindDescriptorByName(name); d != nil { + return d, nil + } + return nil, protoregistry.NotFound + }) +} + +func (r fileResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return resolveInFile(r.f, false, nil, func(f File) (protoreflect.MessageType, error) { + d := f.FindDescriptorByName(message) + if d != nil { + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, fmt.Errorf("%q is %s, not a message", message, descriptorTypeWithArticle(d)) + } + return dynamicpb.NewMessageType(md), nil + } + return nil, protoregistry.NotFound + }) +} + +func (r fileResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + fullName := messageNameFromURL(url) + return r.FindMessageByName(protoreflect.FullName(fullName)) +} + +func messageNameFromURL(url string) string { + lastSlash := strings.LastIndexByte(url, '/') + return url[lastSlash+1:] +} + +func (r fileResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) { + d := f.FindDescriptorByName(field) + if d != nil { + fld, ok := d.(protoreflect.FieldDescriptor) + if !ok || !fld.IsExtension() { + return nil, fmt.Errorf("%q is %s, not an extension", field, descriptorTypeWithArticle(d)) + } + if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + return extd.Type(), nil + } + return dynamicpb.NewExtensionType(fld), nil + } + return nil, protoregistry.NotFound + }) +} + +func (r fileResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) { + ext := findExtension(f, message, field) + if ext != nil { + return ext.Type(), nil + } + return nil, protoregistry.NotFound + }) +} + +type filesResolver []File + +func (r filesResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + for _, f := range r { + if f.Path() == path { + return f, nil + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + for _, f := range r { + result := f.FindDescriptorByName(name) + if result != nil { + return result, nil + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + for _, f := range r { + d := f.FindDescriptorByName(message) + if d != nil { + if md, ok := d.(protoreflect.MessageDescriptor); ok { + return dynamicpb.NewMessageType(md), nil + } + return nil, protoregistry.NotFound + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + name := messageNameFromURL(url) + return r.FindMessageByName(protoreflect.FullName(name)) +} + +func (r filesResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + for _, f := range r { + d := f.FindDescriptorByName(field) + if d != nil { + if extd, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extd.Type(), nil + } + if fld, ok := d.(protoreflect.FieldDescriptor); ok && fld.IsExtension() { + return dynamicpb.NewExtensionType(fld), nil + } + return nil, protoregistry.NotFound + } + } + return nil, protoregistry.NotFound +} + +func (r filesResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + for _, f := range r { + ext := findExtension(f, message, field) + if ext != nil { + return ext.Type(), nil + } + } + return nil, protoregistry.NotFound +} + +type hasExtensionsAndMessages interface { + Messages() protoreflect.MessageDescriptors + Extensions() protoreflect.ExtensionDescriptors +} + +func findExtension(d hasExtensionsAndMessages, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + for i := 0; i < d.Extensions().Len(); i++ { + if extType := isExtensionMatch(d.Extensions().Get(i), message, field); extType != nil { + return extType + } + } + + for i := 0; i < d.Messages().Len(); i++ { + if extType := findExtension(d.Messages().Get(i), message, field); extType != nil { + return extType + } + } + + return nil // could not be found +} + +func isExtensionMatch(ext protoreflect.ExtensionDescriptor, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { + if ext.Number() != field || ext.ContainingMessage().FullName() != message { + return nil + } + if extType, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok { + return extType + } + return dynamicpb.NewExtensionType(ext).TypeDescriptor() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/linker.go b/vendor/github.com/bufbuild/protocompile/linker/linker.go new file mode 100644 index 00000000..6d878838 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/linker.go @@ -0,0 +1,153 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" +) + +// Link handles linking a parsed descriptor proto into a fully-linked descriptor. +// If the given parser.Result has imports, they must all be present in the given +// dependencies. +// +// The symbols value is optional and may be nil. If it is not nil, it must be the +// same instance used to create and link all of the given result's dependencies +// (or otherwise already have all dependencies imported). Otherwise, linking may +// fail with spurious errors resolving symbols. +// +// The handler value is used to report any link errors. If any such errors are +// reported, this function returns a non-nil error. The Result value returned +// also implements protoreflect.FileDescriptor. +// +// Note that linking does NOT interpret options. So options messages in the +// returned value have all values stored in UninterpretedOptions fields. +func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *reporter.Handler) (Result, error) { + if symbols == nil { + symbols = &Symbols{} + } + prefix := parsed.FileDescriptorProto().GetPackage() + if prefix != "" { + prefix += "." + } + + for _, imp := range parsed.FileDescriptorProto().Dependency { + dep := dependencies.FindFileByPath(imp) + if dep == nil { + return nil, fmt.Errorf("dependencies is missing import %q", imp) + } + if err := symbols.Import(dep, handler); err != nil { + return nil, err + } + } + + r := &result{ + FileDescriptor: noOpFile, + Result: parsed, + deps: dependencies, + descriptors: map[string]protoreflect.Descriptor{}, + usedImports: map[string]struct{}{}, + prefix: prefix, + optionQualifiedNames: map[ast.IdentValueNode]string{}, + } + // First, we create the hierarchy of descendant descriptors. + r.createDescendants() + + // Then we can put all symbols into a single pool, which lets us ensure there + // are no duplicate symbols and will also let us resolve and revise all type + // references in next step. + if err := symbols.importResult(r, handler); err != nil { + return nil, err + } + + // After we've populated the pool, we can now try to resolve all type + // references. All references must be checked for correct type, any fields + // with enum types must be corrected (since we parse them as if they are + // message references since we don't actually know message or enum until + // link time), and references will be re-written to be fully-qualified + // references (e.g. start with a dot "."). + if err := r.resolveReferences(handler, symbols); err != nil { + return nil, err + } + + return r, handler.Error() +} + +// Result is the result of linking. This is a protoreflect.FileDescriptor, but +// with some additional methods for exposing additional information, such as the +// for accessing the input AST or file descriptor. +// +// It also provides Resolve* methods, for looking up enums, messages, and +// extensions that are available to the protobuf source file this result +// represents. An element is "available" if it meets any of the following +// criteria: +// 1. The element is defined in this file itself. +// 2. The element is defined in a file that is directly imported by this file. +// 3. The element is "available" to a file that is directly imported by this +// file as a public import. +// +// Other elements, even if in the transitive closure of this file, are not +// available and thus won't be returned by these methods. +type Result interface { + File + parser.Result + + // ResolveMessageLiteralExtensionName returns the fully qualified name for + // an identifier for extension field names in message literals. + ResolveMessageLiteralExtensionName(ast.IdentValueNode) string + // ValidateOptions runs some validation checks on the descriptor that can only + // be done after options are interpreted. Any errors or warnings encountered + // will be reported via the given handler. If any error is reported, this + // function returns a non-nil error. + ValidateOptions(handler *reporter.Handler, symbols *Symbols) error + // CheckForUnusedImports is used to report warnings for unused imports. This + // should be called after options have been interpreted. Otherwise, the logic + // could incorrectly report imports as unused if the only symbol used were a + // custom option. + CheckForUnusedImports(handler *reporter.Handler) + // PopulateSourceCodeInfo is used to populate source code info for the file + // descriptor. This step requires that the underlying descriptor proto have + // its `source_code_info` field populated. This is typically a post-process + // step separate from linking, because computing source code info requires + // interpreting options (which is done after linking). + PopulateSourceCodeInfo() + + // RemoveAST drops the AST information from this result. + RemoveAST() +} + +// ErrorUnusedImport may be passed to a warning reporter when an unused +// import is detected. The error the reporter receives will be wrapped +// with source position that indicates the file and line where the import +// statement appeared. +type ErrorUnusedImport interface { + error + UnusedImport() string +} + +type errUnusedImport string + +func (e errUnusedImport) Error() string { + return fmt.Sprintf("import %q not used", string(e)) +} + +func (e errUnusedImport) UnusedImport() string { + return string(e) +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go new file mode 100644 index 00000000..e00debc6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build appengine || gopherjs || purego +// +build appengine gopherjs purego + +// NB: other environments where unsafe is inappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package linker + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func pathKey(p protoreflect.SourcePath) interface{} { + rv := reflect.ValueOf(p) + arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem()) + array := reflect.New(arrayType).Elem() + reflect.Copy(array, rv) + return array.Interface() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go new file mode 100644 index 00000000..aa33e74f --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go @@ -0,0 +1,40 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !appengine && !gopherjs && !purego +// +build !appengine,!gopherjs,!purego + +// NB: other environments where unsafe is inappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package linker + +import ( + "reflect" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +var pathElementType = reflect.TypeOf(protoreflect.SourcePath{}).Elem() + +func pathKey(p protoreflect.SourcePath) interface{} { + if p == nil { + // Reflection code below doesn't work with nil slices + return [0]int32{} + } + hdr := (*reflect.SliceHeader)(unsafe.Pointer(reflect.ValueOf(&p).Pointer())) + array := reflect.NewAt(reflect.ArrayOf(hdr.Len, pathElementType), unsafe.Pointer(hdr.Data)) + return array.Elem().Interface() +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/pool.go b/vendor/github.com/bufbuild/protocompile/linker/pool.go new file mode 100644 index 00000000..3609edcb --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/pool.go @@ -0,0 +1,131 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import "google.golang.org/protobuf/types/descriptorpb" + +// allocPool helps allocate descriptor instances. Instead of allocating +// them one at a time, we allocate a pool -- a large, flat slice to hold +// all descriptors of a particular kind for a file. We then use capacity +// in the pool when we need space for individual descriptors. +type allocPool struct { + numMessages int + numFields int + numOneofs int + numEnums int + numEnumValues int + numExtensions int + numServices int + numMethods int + + messages []msgDescriptor + fields []fldDescriptor + oneofs []oneofDescriptor + enums []enumDescriptor + enumVals []enValDescriptor + extensions []extTypeDescriptor + services []svcDescriptor + methods []mtdDescriptor +} + +func newAllocPool(file *descriptorpb.FileDescriptorProto) *allocPool { + var pool allocPool + pool.countElements(file) + pool.messages = make([]msgDescriptor, pool.numMessages) + pool.fields = make([]fldDescriptor, pool.numFields) + pool.oneofs = make([]oneofDescriptor, pool.numOneofs) + pool.enums = make([]enumDescriptor, pool.numEnums) + pool.enumVals = make([]enValDescriptor, pool.numEnumValues) + pool.extensions = make([]extTypeDescriptor, pool.numExtensions) + pool.services = make([]svcDescriptor, pool.numServices) + pool.methods = make([]mtdDescriptor, pool.numMethods) + return &pool +} + +func (p *allocPool) getMessages(count int) []msgDescriptor { + allocated := p.messages[:count] + p.messages = p.messages[count:] + return allocated +} + +func (p *allocPool) getFields(count int) []fldDescriptor { + allocated := p.fields[:count] + p.fields = p.fields[count:] + return allocated +} + +func (p *allocPool) getOneofs(count int) []oneofDescriptor { + allocated := p.oneofs[:count] + p.oneofs = p.oneofs[count:] + return allocated +} + +func (p *allocPool) getEnums(count int) []enumDescriptor { + allocated := p.enums[:count] + p.enums = p.enums[count:] + return allocated +} + +func (p *allocPool) getEnumValues(count int) []enValDescriptor { + allocated := p.enumVals[:count] + p.enumVals = p.enumVals[count:] + return allocated +} + +func (p *allocPool) getExtensions(count int) []extTypeDescriptor { + allocated := p.extensions[:count] + p.extensions = p.extensions[count:] + return allocated +} + +func (p *allocPool) getServices(count int) []svcDescriptor { + allocated := p.services[:count] + p.services = p.services[count:] + return allocated +} + +func (p *allocPool) getMethods(count int) []mtdDescriptor { + allocated := p.methods[:count] + p.methods = p.methods[count:] + return allocated +} + +func (p *allocPool) countElements(file *descriptorpb.FileDescriptorProto) { + p.countElementsInMessages(file.MessageType) + p.countElementsInEnums(file.EnumType) + p.numExtensions += len(file.Extension) + p.numServices += len(file.Service) + for _, svc := range file.Service { + p.numMethods += len(svc.Method) + } +} + +func (p *allocPool) countElementsInMessages(msgs []*descriptorpb.DescriptorProto) { + p.numMessages += len(msgs) + for _, msg := range msgs { + p.numFields += len(msg.Field) + p.numOneofs += len(msg.OneofDecl) + p.countElementsInMessages(msg.NestedType) + p.countElementsInEnums(msg.EnumType) + p.numExtensions += len(msg.Extension) + } +} + +func (p *allocPool) countElementsInEnums(enums []*descriptorpb.EnumDescriptorProto) { + p.numEnums += len(enums) + for _, enum := range enums { + p.numEnumValues += len(enum.Value) + } +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/resolve.go b/vendor/github.com/bufbuild/protocompile/linker/resolve.go new file mode 100644 index 00000000..cf30148c --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/resolve.go @@ -0,0 +1,835 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "errors" + "fmt" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +func (r *result) ResolveMessageLiteralExtensionName(node ast.IdentValueNode) string { + return r.optionQualifiedNames[node] +} + +func (r *result) resolveElement(name protoreflect.FullName, checkedCache []string) protoreflect.Descriptor { + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + res, _ := resolveInFile(r, false, checkedCache[:0], func(f File) (protoreflect.Descriptor, error) { + d := resolveElementInFile(name, f) + if d != nil { + return d, nil + } + return nil, protoregistry.NotFound + }) + return res +} + +func resolveInFile[T any](f File, publicImportsOnly bool, checked []string, fn func(File) (T, error)) (T, error) { + var zero T + path := f.Path() + for _, str := range checked { + if str == path { + // already checked + return zero, protoregistry.NotFound + } + } + checked = append(checked, path) + + res, err := fn(f) + if err == nil { + // found it + return res, nil + } + if !errors.Is(err, protoregistry.NotFound) { + return zero, err + } + + imports := f.Imports() + for i, l := 0, imports.Len(); i < l; i++ { + imp := imports.Get(i) + if publicImportsOnly && !imp.IsPublic { + continue + } + res, err := resolveInFile(f.FindImportByPath(imp.Path()), true, checked, fn) + if errors.Is(err, protoregistry.NotFound) { + continue + } + if err != nil { + return zero, err + } + if !imp.IsPublic { + if r, ok := f.(*result); ok { + r.markUsed(imp.Path()) + } + } + return res, nil + } + return zero, err +} + +func (r *result) markUsed(importPath string) { + r.usedImports[importPath] = struct{}{} +} + +func (r *result) CheckForUnusedImports(handler *reporter.Handler) { + fd := r.FileDescriptorProto() + file, _ := r.FileNode().(*ast.FileNode) + for i, dep := range fd.Dependency { + if _, ok := r.usedImports[dep]; !ok { + isPublic := false + // it's fine if it's a public import + for _, j := range fd.PublicDependency { + if i == int(j) { + isPublic = true + break + } + } + if isPublic { + continue + } + span := ast.UnknownSpan(fd.GetName()) + if file != nil { + for _, decl := range file.Decls { + imp, ok := decl.(*ast.ImportNode) + if ok && imp.Name.AsString() == dep { + span = file.NodeInfo(imp) + } + } + } + handler.HandleWarningWithPos(span, errUnusedImport(dep)) + } + } +} + +func descriptorTypeWithArticle(d protoreflect.Descriptor) string { + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return "a message" + case protoreflect.FieldDescriptor: + if d.IsExtension() { + return "an extension" + } + return "a field" + case protoreflect.OneofDescriptor: + return "a oneof" + case protoreflect.EnumDescriptor: + return "an enum" + case protoreflect.EnumValueDescriptor: + return "an enum value" + case protoreflect.ServiceDescriptor: + return "a service" + case protoreflect.MethodDescriptor: + return "a method" + case protoreflect.FileDescriptor: + return "a file" + default: + // shouldn't be possible + return fmt.Sprintf("a %T", d) + } +} + +func (r *result) createDescendants() { + fd := r.FileDescriptorProto() + pool := newAllocPool(fd) + prefix := "" + if fd.GetPackage() != "" { + prefix = fd.GetPackage() + "." + } + r.imports = r.createImports() + r.messages = r.createMessages(prefix, r, fd.MessageType, pool) + r.enums = r.createEnums(prefix, r, fd.EnumType, pool) + r.extensions = r.createExtensions(prefix, r, fd.Extension, pool) + r.services = r.createServices(prefix, fd.Service, pool) +} + +func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error { + fd := r.FileDescriptorProto() + checkedCache := make([]string, 0, 16) + scopes := []scope{fileScope(r, checkedCache)} + if fd.Options != nil { + if err := r.resolveOptions(handler, "file", protoreflect.FullName(fd.GetName()), fd.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + + // This is to de-dupe extendee-releated error messages when the same + // extendee is referenced from multiple extension field definitions. + // We leave it nil if there's no AST. + var extendeeNodes map[ast.Node]struct{} + + return walk.DescriptorsEnterAndExit(r, + func(d protoreflect.Descriptor) error { + fqn := d.FullName() + switch d := d.(type) { + case *msgDescriptor: + // Strangely, when protoc resolves extension names, it uses the *enclosing* scope + // instead of the message's scope. So if the message contains an extension named "i", + // an option cannot refer to it as simply "i" but must qualify it (at a minimum "Msg.i"). + // So we don't add this messages scope to our scopes slice until *after* we do options. + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "message", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry + // walk only visits descriptors, so we need to loop over extension ranges ourselves + for _, er := range d.proto.ExtensionRange { + if er.Options != nil { + erName := protoreflect.FullName(fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)) + if err := r.resolveOptions(handler, "extension range", erName, er.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + } + case *extTypeDescriptor: + if d.field.proto.Options != nil { + if err := r.resolveOptions(handler, "extension", fqn, d.field.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + if extendeeNodes == nil && r.AST() != nil { + extendeeNodes = map[ast.Node]struct{}{} + } + if err := resolveFieldTypes(&d.field, handler, extendeeNodes, s, scopes, checkedCache); err != nil { + return err + } + if r.Syntax() == protoreflect.Proto3 && !allowedProto3Extendee(d.field.proto.GetExtendee()) { + file := r.FileNode() + node := r.FieldNode(d.field.proto).FieldExtendee() + if err := handler.HandleErrorf(file.NodeInfo(node), "extend blocks in proto3 can only be used to define custom options"); err != nil { + return err + } + } + case *fldDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "field", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + if err := resolveFieldTypes(d, handler, nil, s, scopes, checkedCache); err != nil { + return err + } + case *oneofDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "oneof", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + case *enumDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "enum", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + case *enValDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "enum value", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + case *svcDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "service", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + // not a message, but same scoping rules for nested elements as if it were + scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry + case *mtdDescriptor: + if d.proto.Options != nil { + if err := r.resolveOptions(handler, "method", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + return err + } + } + if err := resolveMethodTypes(d, handler, scopes, checkedCache); err != nil { + return err + } + } + return nil + }, + func(d protoreflect.Descriptor) error { + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.ServiceDescriptor: + // pop message scope on exit + scopes = scopes[:len(scopes)-1] + } + return nil + }) +} + +var allowedProto3Extendees = map[string]struct{}{ + ".google.protobuf.FileOptions": {}, + ".google.protobuf.MessageOptions": {}, + ".google.protobuf.FieldOptions": {}, + ".google.protobuf.OneofOptions": {}, + ".google.protobuf.ExtensionRangeOptions": {}, + ".google.protobuf.EnumOptions": {}, + ".google.protobuf.EnumValueOptions": {}, + ".google.protobuf.ServiceOptions": {}, + ".google.protobuf.MethodOptions": {}, +} + +func allowedProto3Extendee(n string) bool { + if n == "" { + // not an extension, allowed + return true + } + _, ok := allowedProto3Extendees[n] + return ok +} + +func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees map[ast.Node]struct{}, s *Symbols, scopes []scope, checkedCache []string) error { + r := f.file + fld := f.proto + file := r.FileNode() + node := r.FieldNode(fld) + kind := "field" + if fld.GetExtendee() != "" { + kind = "extension" + var alreadyReported bool + if extendees != nil { + _, alreadyReported = extendees[node.FieldExtendee()] + if !alreadyReported { + extendees[node.FieldExtendee()] = struct{}{} + } + } + dsc := r.resolve(fld.GetExtendee(), false, scopes, checkedCache) + if dsc == nil { + if alreadyReported { + return nil + } + var extendeePrefix string + if extendees == nil { + extendeePrefix = kind + " " + f.fqn + ": " + } + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s", extendeePrefix, fld.GetExtendee()) + } + if isSentinelDescriptor(dsc) { + if alreadyReported { + return nil + } + var extendeePrefix string + if extendees == nil { + extendeePrefix = kind + " " + f.fqn + ": " + } + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s; resolved to %s which is not defined; consider using a leading dot", extendeePrefix, fld.GetExtendee(), dsc.FullName()) + } + extd, ok := dsc.(protoreflect.MessageDescriptor) + if !ok { + if alreadyReported { + return nil + } + var extendeePrefix string + if extendees == nil { + extendeePrefix = kind + " " + f.fqn + ": " + } + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sextendee is invalid: %s is %s, not a message", extendeePrefix, dsc.FullName(), descriptorTypeWithArticle(dsc)) + } + + f.extendee = extd + extendeeName := "." + string(dsc.FullName()) + if fld.GetExtendee() != extendeeName { + fld.Extendee = proto.String(extendeeName) + } + // make sure the tag number is in range + found := false + tag := protoreflect.FieldNumber(fld.GetNumber()) + for i := 0; i < extd.ExtensionRanges().Len(); i++ { + rng := extd.ExtensionRanges().Get(i) + if tag >= rng[0] && tag < rng[1] { + found = true + break + } + } + if !found { + if err := handler.HandleErrorf(file.NodeInfo(node.FieldTag()), "%s %s: tag %d is not in valid range for extended type %s", kind, f.fqn, tag, dsc.FullName()); err != nil { + return err + } + } else { + // make sure tag is not a duplicate + if err := s.AddExtension(packageFor(dsc), dsc.FullName(), tag, file.NodeInfo(node.FieldTag()), handler); err != nil { + return err + } + } + } else if f.proto.OneofIndex != nil { + parent := f.parent.(protoreflect.MessageDescriptor) //nolint:errcheck + index := int(f.proto.GetOneofIndex()) + f.oneof = parent.Oneofs().Get(index) + } + + if fld.GetTypeName() == "" { + // scalar type; no further resolution required + return nil + } + + dsc := r.resolve(fld.GetTypeName(), true, scopes, checkedCache) + if dsc == nil { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s", kind, f.fqn, fld.GetTypeName()) + } + if isSentinelDescriptor(dsc) { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s; resolved to %s which is not defined; consider using a leading dot", kind, f.fqn, fld.GetTypeName(), dsc.FullName()) + } + switch dsc := dsc.(type) { + case protoreflect.MessageDescriptor: + if dsc.IsMapEntry() { + isValid := false + switch node.(type) { + case *ast.MapFieldNode: + // We have an AST for this file and can see this field is from a map declaration + isValid = true + case *ast.NoSourceNode: + // We don't have an AST for the file (it came from a provided descriptor). So we + // need to validate that it's not an illegal reference. To be valid, the field + // must be repeated and the entry type must be nested in the same enclosing + // message as the field. + isValid = isValidMap(f, dsc) + if isValid && f.index > 0 { + // also make sure there are no earlier fields that are valid for this map entry + flds := f.Parent().(protoreflect.MessageDescriptor).Fields() + for i := 0; i < f.index; i++ { + if isValidMap(flds.Get(i), dsc) { + isValid = false + break + } + } + } + } + if !isValid { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: %s is a synthetic map entry and may not be referenced explicitly", kind, f.fqn, dsc.FullName()) + } + } + typeName := "." + string(dsc.FullName()) + if fld.GetTypeName() != typeName { + fld.TypeName = proto.String(typeName) + } + if fld.Type == nil { + // if type was tentatively unset, we now know it's actually a message + fld.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_GROUP { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) + } + f.msgType = dsc + case protoreflect.EnumDescriptor: + typeName := "." + string(dsc.FullName()) + if fld.GetTypeName() != typeName { + fld.TypeName = proto.String(typeName) + } + if fld.Type == nil { + // the type was tentatively unset, but now we know it's actually an enum + fld.Type = descriptorpb.FieldDescriptorProto_TYPE_ENUM.Enum() + } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_ENUM { + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_ENUM) + } + f.enumType = dsc + default: + return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: invalid type: %s is %s, not a message or enum", kind, f.fqn, dsc.FullName(), descriptorTypeWithArticle(dsc)) + } + return nil +} + +func packageFor(dsc protoreflect.Descriptor) protoreflect.FullName { + if dsc.ParentFile() != nil { + return dsc.ParentFile().Package() + } + // Can't access package? Make a best effort guess. + return dsc.FullName().Parent() +} + +func isValidMap(mapField protoreflect.FieldDescriptor, mapEntry protoreflect.MessageDescriptor) bool { + return !mapField.IsExtension() && + mapEntry.Parent() == mapField.ContainingMessage() && + mapField.Cardinality() == protoreflect.Repeated && + string(mapEntry.Name()) == internal.InitCap(internal.JSONName(string(mapField.Name())))+"Entry" +} + +func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []scope, checkedCache []string) error { + scope := "method " + m.fqn + r := m.file + mtd := m.proto + file := r.FileNode() + node := r.MethodNode(mtd) + dsc := r.resolve(mtd.GetInputType(), false, scopes, checkedCache) + if dsc == nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil { + return err + } + } else if isSentinelDescriptor(dsc) { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetInputType(), dsc.FullName()); err != nil { + return err + } + } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: invalid request type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + return err + } + } else { + typeName := "." + string(dsc.FullName()) + if mtd.GetInputType() != typeName { + mtd.InputType = proto.String(typeName) + } + m.inputType = msg + } + + // TODO: make input and output type resolution more DRY + dsc = r.resolve(mtd.GetOutputType(), false, scopes, checkedCache) + if dsc == nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil { + return err + } + } else if isSentinelDescriptor(dsc) { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetOutputType(), dsc.FullName()); err != nil { + return err + } + } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: invalid response type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + return err + } + } else { + typeName := "." + string(dsc.FullName()) + if mtd.GetOutputType() != typeName { + mtd.OutputType = proto.String(typeName) + } + m.outputType = msg + } + + return nil +} + +func (r *result) resolveOptions(handler *reporter.Handler, elemType string, elemName protoreflect.FullName, opts []*descriptorpb.UninterpretedOption, scopes []scope, checkedCache []string) error { + mc := &internal.MessageContext{ + File: r, + ElementName: string(elemName), + ElementType: elemType, + } + file := r.FileNode() +opts: + for _, opt := range opts { + // resolve any extension names found in option names + for _, nm := range opt.Name { + if nm.GetIsExtension() { + node := r.OptionNamePartNode(nm) + fqn, err := r.resolveExtensionName(nm.GetNamePart(), scopes, checkedCache) + if err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node), "%v%v", mc, err); err != nil { + return err + } + continue opts + } + nm.NamePart = proto.String(fqn) + } + } + // also resolve any extension names found inside message literals in option values + mc.Option = opt + optVal := r.OptionNode(opt).GetValue() + if err := r.resolveOptionValue(handler, mc, optVal, scopes, checkedCache); err != nil { + return err + } + mc.Option = nil + } + return nil +} + +func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.MessageContext, val ast.ValueNode, scopes []scope, checkedCache []string) error { + optVal := val.Value() + switch optVal := optVal.(type) { + case []ast.ValueNode: + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + for i, v := range optVal { + mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, i) + if err := r.resolveOptionValue(handler, mc, v, scopes, checkedCache); err != nil { + return err + } + } + case []*ast.MessageFieldNode: + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + for _, fld := range optVal { + // check for extension name + if fld.Name.IsExtension() { + // Confusingly, an extension reference inside a message literal cannot refer to + // elements in the same enclosing message without a qualifier. Basically, we + // treat this as if there were no message scopes, so only the package name is + // used for resolving relative references. (Inconsistent protoc behavior, but + // likely due to how it re-uses C++ text format implementation, and normal text + // format doesn't expect that kind of relative reference.) + scopes := scopes[:1] // first scope is file, the rest are enclosing messages + fqn, err := r.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), scopes, checkedCache) + if err != nil { + if err := handler.HandleErrorf(r.FileNode().NodeInfo(fld.Name.Name), "%v%v", mc, err); err != nil { + return err + } + } else { + r.optionQualifiedNames[fld.Name.Name] = fqn + } + } + + // recurse into value + mc.OptAggPath = origPath + if origPath != "" { + mc.OptAggPath += "." + } + if fld.Name.IsExtension() { + mc.OptAggPath = fmt.Sprintf("%s[%s]", mc.OptAggPath, string(fld.Name.Name.AsIdentifier())) + } else { + mc.OptAggPath = fmt.Sprintf("%s%s", mc.OptAggPath, string(fld.Name.Name.AsIdentifier())) + } + + if err := r.resolveOptionValue(handler, mc, fld.Val, scopes, checkedCache); err != nil { + return err + } + } + } + return nil +} + +func (r *result) resolveExtensionName(name string, scopes []scope, checkedCache []string) (string, error) { + dsc := r.resolve(name, false, scopes, checkedCache) + if dsc == nil { + return "", fmt.Errorf("unknown extension %s", name) + } + if isSentinelDescriptor(dsc) { + return "", fmt.Errorf("unknown extension %s; resolved to %s which is not defined; consider using a leading dot", name, dsc.FullName()) + } + if ext, ok := dsc.(protoreflect.FieldDescriptor); !ok { + return "", fmt.Errorf("invalid extension: %s is %s, not an extension", name, descriptorTypeWithArticle(dsc)) + } else if !ext.IsExtension() { + return "", fmt.Errorf("invalid extension: %s is a field but not an extension", name) + } + return string("." + dsc.FullName()), nil +} + +func (r *result) resolve(name string, onlyTypes bool, scopes []scope, checkedCache []string) protoreflect.Descriptor { + if strings.HasPrefix(name, ".") { + // already fully-qualified + return r.resolveElement(protoreflect.FullName(name[1:]), checkedCache) + } + // unqualified, so we look in the enclosing (last) scope first and move + // towards outermost (first) scope, trying to resolve the symbol + pos := strings.IndexByte(name, '.') + firstName := name + if pos > 0 { + firstName = name[:pos] + } + var bestGuess protoreflect.Descriptor + for i := len(scopes) - 1; i >= 0; i-- { + d := scopes[i](firstName, name) + if d != nil { + // In `protoc`, it will skip a match of the wrong type and move on + // to the next scope, but only if the reference is unqualified. So + // we mirror that behavior here. When we skip and move on, we go + // ahead and save the match of the wrong type so we can at least use + // it to construct a better error in the event that we don't find + // any match of the right type. + if !onlyTypes || isType(d) || firstName != name { + return d + } + if bestGuess == nil { + bestGuess = d + } + } + } + // we return best guess, even though it was not an allowed kind of + // descriptor, so caller can print a better error message (e.g. + // indicating that the name was found but that it's the wrong type) + return bestGuess +} + +func isType(d protoreflect.Descriptor) bool { + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor: + return true + } + return false +} + +// scope represents a lexical scope in a proto file in which messages and enums +// can be declared. +type scope func(firstName, fullName string) protoreflect.Descriptor + +func fileScope(r *result, checkedCache []string) scope { + // we search symbols in this file, but also symbols in other files that have + // the same package as this file or a "parent" package (in protobuf, + // packages are a hierarchy like C++ namespaces) + prefixes := internal.CreatePrefixList(r.FileDescriptorProto().GetPackage()) + querySymbol := func(n string) protoreflect.Descriptor { + return r.resolveElement(protoreflect.FullName(n), checkedCache) + } + return func(firstName, fullName string) protoreflect.Descriptor { + for _, prefix := range prefixes { + var n1, n string + if prefix == "" { + // exhausted all prefixes, so it must be in this one + n1, n = fullName, fullName + } else { + n = prefix + "." + fullName + n1 = prefix + "." + firstName + } + d := resolveElementRelative(n1, n, querySymbol) + if d != nil { + return d + } + } + return nil + } +} + +func messageScope(r *result, messageName protoreflect.FullName) scope { + querySymbol := func(n string) protoreflect.Descriptor { + return resolveElementInFile(protoreflect.FullName(n), r) + } + return func(firstName, fullName string) protoreflect.Descriptor { + n1 := string(messageName) + "." + firstName + n := string(messageName) + "." + fullName + return resolveElementRelative(n1, n, querySymbol) + } +} + +func resolveElementRelative(firstName, fullName string, query func(name string) protoreflect.Descriptor) protoreflect.Descriptor { + d := query(firstName) + if d == nil { + return nil + } + if firstName == fullName { + return d + } + if !isAggregateDescriptor(d) { + // can't possibly find the rest of full name if + // the first name indicated a leaf descriptor + return nil + } + d = query(fullName) + if d == nil { + return newSentinelDescriptor(fullName) + } + return d +} + +func resolveElementInFile(name protoreflect.FullName, f File) protoreflect.Descriptor { + d := f.FindDescriptorByName(name) + if d != nil { + return d + } + + if matchesPkgNamespace(name, f.Package()) { + // this sentinel means the name is a valid namespace but + // does not refer to a descriptor + return newSentinelDescriptor(string(name)) + } + return nil +} + +func matchesPkgNamespace(fqn, pkg protoreflect.FullName) bool { + if pkg == "" { + return false + } + if fqn == pkg { + return true + } + if len(pkg) > len(fqn) && strings.HasPrefix(string(pkg), string(fqn)) { + // if char after fqn is a dot, then fqn is a namespace + if pkg[len(fqn)] == '.' { + return true + } + } + return false +} + +func isAggregateDescriptor(d protoreflect.Descriptor) bool { + if isSentinelDescriptor(d) { + // this indicates the name matched a package, not a + // descriptor, but a package is an aggregate, so + // we return true + return true + } + switch d.(type) { + case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor, protoreflect.ServiceDescriptor: + return true + default: + return false + } +} + +func isSentinelDescriptor(d protoreflect.Descriptor) bool { + _, ok := d.(*sentinelDescriptor) + return ok +} + +func newSentinelDescriptor(name string) protoreflect.Descriptor { + return &sentinelDescriptor{name: name} +} + +// sentinelDescriptor is a placeholder descriptor. It is used instead of nil to +// distinguish between two situations: +// 1. The given name could not be found. +// 2. The given name *cannot* be a valid result so stop searching. +// +// In these cases, attempts to resolve an element name will return nil for the +// first case and will return a sentinelDescriptor in the second. The sentinel +// contains the fully-qualified name which caused the search to stop (which may +// be a prefix of the actual name being resolved). +type sentinelDescriptor struct { + protoreflect.Descriptor + name string +} + +func (p *sentinelDescriptor) ParentFile() protoreflect.FileDescriptor { + return nil +} + +func (p *sentinelDescriptor) Parent() protoreflect.Descriptor { + return nil +} + +func (p *sentinelDescriptor) Index() int { + return 0 +} + +func (p *sentinelDescriptor) Syntax() protoreflect.Syntax { + return 0 +} + +func (p *sentinelDescriptor) Name() protoreflect.Name { + return protoreflect.Name(p.name) +} + +func (p *sentinelDescriptor) FullName() protoreflect.FullName { + return protoreflect.FullName(p.name) +} + +func (p *sentinelDescriptor) IsPlaceholder() bool { + return false +} + +func (p *sentinelDescriptor) Options() protoreflect.ProtoMessage { + return nil +} + +var _ protoreflect.Descriptor = (*sentinelDescriptor)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/linker/symbols.go b/vendor/github.com/bufbuild/protocompile/linker/symbols.go new file mode 100644 index 00000000..c8db762b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/symbols.go @@ -0,0 +1,635 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/protoutil" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +const unknownFilePath = "" + +// Symbols is a symbol table that maps names for all program elements to their +// location in source. It also tracks extension tag numbers. This can be used +// to enforce uniqueness for symbol names and tag numbers across many files and +// many link operations. +// +// This type is thread-safe. +type Symbols struct { + pkgTrie packageSymbols + + // We don't know the packages for these symbols, so we can't + // keep them in the pkgTrie. In vast majority of cases, this + // will always be empty/unused. When used, it ensures that + // multiple extension declarations don't refer to the same + // extension. + extDeclsMu sync.Mutex + extDecls map[protoreflect.FullName]extDecl +} + +type packageSymbols struct { + mu sync.RWMutex + children map[protoreflect.FullName]*packageSymbols + files map[protoreflect.FileDescriptor]struct{} + symbols map[protoreflect.FullName]symbolEntry + exts map[extNumber]ast.SourceSpan +} + +type extNumber struct { + extendee protoreflect.FullName + tag protoreflect.FieldNumber +} + +type symbolEntry struct { + span ast.SourceSpan + isEnumValue bool + isPackage bool +} + +type extDecl struct { + span ast.SourceSpan + extendee protoreflect.FullName + tag protoreflect.FieldNumber +} + +// Import populates the symbol table with all symbols/elements and extension +// tags present in the given file descriptor. If s is nil or if fd has already +// been imported into s, this returns immediately without doing anything. If any +// collisions in symbol names or extension tags are identified, an error will be +// returned and the symbol table will not be updated. +func (s *Symbols) Import(fd protoreflect.FileDescriptor, handler *reporter.Handler) error { + if s == nil { + return nil + } + + if f, ok := fd.(protoreflect.FileImport); ok { + // unwrap any import instance + fd = f.FileDescriptor + } + if f, ok := fd.(*file); ok { + // unwrap any file instance + fd = f.FileDescriptor + } + + var pkgSpan ast.SourceSpan + if res, ok := fd.(*result); ok { + pkgSpan = packageNameSpan(res) + } else { + pkgSpan = sourceSpanForPackage(fd) + } + pkg, err := s.importPackages(pkgSpan, fd.Package(), handler) + if err != nil || pkg == nil { + return err + } + + pkg.mu.RLock() + _, alreadyImported := pkg.files[fd] + pkg.mu.RUnlock() + + if alreadyImported { + return nil + } + + for i := 0; i < fd.Imports().Len(); i++ { + if err := s.Import(fd.Imports().Get(i).FileDescriptor, handler); err != nil { + return err + } + } + + if res, ok := fd.(*result); ok && res.hasSource() { + return s.importResultWithExtensions(pkg, res, handler) + } + + return s.importFileWithExtensions(pkg, fd, handler) +} + +func (s *Symbols) importFileWithExtensions(pkg *packageSymbols, fd protoreflect.FileDescriptor, handler *reporter.Handler) error { + imported, err := pkg.importFile(fd, handler) + if err != nil { + return err + } + if !imported { + // nothing else to do + return nil + } + + return walk.Descriptors(fd, func(d protoreflect.Descriptor) error { + fld, ok := d.(protoreflect.FieldDescriptor) + if !ok || !fld.IsExtension() { + return nil + } + span := sourceSpanForNumber(fld) + extendee := fld.ContainingMessage() + return s.AddExtension(packageFor(extendee), extendee.FullName(), fld.Number(), span, handler) + }) +} + +func (s *packageSymbols) importFile(fd protoreflect.FileDescriptor, handler *reporter.Handler) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.files[fd]; ok { + // have to double-check if it's already imported, in case + // it was added after above read-locked check + return false, nil + } + + // first pass: check for conflicts + if err := s.checkFileLocked(fd, handler); err != nil { + return false, err + } + if err := handler.Error(); err != nil { + return false, err + } + + // second pass: commit all symbols + s.commitFileLocked(fd) + + return true, nil +} + +func (s *Symbols) importPackages(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { + if pkg == "" { + return &s.pkgTrie, nil + } + + cur := &s.pkgTrie + enumerator := nameEnumerator{name: pkg} + for { + p, ok := enumerator.next() + if !ok { + return cur, nil + } + var err error + cur, err = cur.importPackage(pkgSpan, p, handler) + if err != nil { + return nil, err + } + if cur == nil { + return nil, nil + } + } +} + +func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { + s.mu.RLock() + existing, ok := s.symbols[pkg] + var child *packageSymbols + if ok && existing.isPackage { + child = s.children[pkg] + } + s.mu.RUnlock() + + if ok && existing.isPackage { + // package already exists + return child, nil + } else if ok { + return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler) + } + + s.mu.Lock() + defer s.mu.Unlock() + // have to double-check in case it was added while upgrading to write lock + existing, ok = s.symbols[pkg] + if ok && existing.isPackage { + // package already exists + return s.children[pkg], nil + } else if ok { + return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler) + } + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + s.symbols[pkg] = symbolEntry{span: pkgSpan, isPackage: true} + child = &packageSymbols{} + if s.children == nil { + s.children = map[protoreflect.FullName]*packageSymbols{} + } + s.children[pkg] = child + return child, nil +} + +func (s *Symbols) getPackage(pkg protoreflect.FullName, exact bool) *packageSymbols { + if pkg == "" { + return &s.pkgTrie + } + cur := &s.pkgTrie + enumerator := nameEnumerator{name: pkg} + for { + p, ok := enumerator.next() + if !ok { + return cur + } + cur.mu.RLock() + next := cur.children[p] + cur.mu.RUnlock() + + if next == nil { + if exact { + return nil + } + return cur + } + cur = next + } +} + +func reportSymbolCollision(span ast.SourceSpan, fqn protoreflect.FullName, additionIsEnumVal bool, existing symbolEntry, handler *reporter.Handler) error { + // because of weird scoping for enum values, provide more context in error message + // if this conflict is with an enum value + var isPkg, suffix string + if additionIsEnumVal || existing.isEnumValue { + suffix = "; protobuf uses C++ scoping rules for enum values, so they exist in the scope enclosing the enum" + } + if existing.isPackage { + isPkg = " as a package" + } + orig := existing.span + conflict := span + if posLess(conflict.Start(), orig.Start()) { + orig, conflict = conflict, orig + } + return handler.HandleErrorf(conflict, "symbol %q already defined%s at %v%s", fqn, isPkg, orig.Start(), suffix) +} + +func posLess(a, b ast.SourcePos) bool { + if a.Filename == b.Filename { + if a.Line == b.Line { + return a.Col < b.Col + } + return a.Line < b.Line + } + return false +} + +func (s *packageSymbols) checkFileLocked(f protoreflect.FileDescriptor, handler *reporter.Handler) error { + return walk.Descriptors(f, func(d protoreflect.Descriptor) error { + span := sourceSpanFor(d) + if existing, ok := s.symbols[d.FullName()]; ok { + _, isEnumVal := d.(protoreflect.EnumValueDescriptor) + if err := reportSymbolCollision(span, d.FullName(), isEnumVal, existing, handler); err != nil { + return err + } + } + return nil + }) +} + +func sourceSpanForPackage(fd protoreflect.FileDescriptor) ast.SourceSpan { + loc := fd.SourceLocations().ByPath([]int32{internal.FilePackageTag}) + if internal.IsZeroLocation(loc) { + return ast.UnknownSpan(fd.Path()) + } + return ast.NewSourceSpan( + ast.SourcePos{ + Filename: fd.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + }, + ast.SourcePos{ + Filename: fd.Path(), + Line: loc.EndLine, + Col: loc.EndColumn, + }, + ) +} + +func sourceSpanFor(d protoreflect.Descriptor) ast.SourceSpan { + file := d.ParentFile() + if file == nil { + return ast.UnknownSpan(unknownFilePath) + } + if result, ok := file.(*result); ok { + return nameSpan(result.FileNode(), result.Node(protoutil.ProtoFromDescriptor(d))) + } + path, ok := internal.ComputePath(d) + if !ok { + return ast.UnknownSpan(file.Path()) + } + namePath := path + switch d.(type) { + case protoreflect.FieldDescriptor: + namePath = append(namePath, internal.FieldNameTag) + case protoreflect.MessageDescriptor: + namePath = append(namePath, internal.MessageNameTag) + case protoreflect.OneofDescriptor: + namePath = append(namePath, internal.OneofNameTag) + case protoreflect.EnumDescriptor: + namePath = append(namePath, internal.EnumNameTag) + case protoreflect.EnumValueDescriptor: + namePath = append(namePath, internal.EnumValNameTag) + case protoreflect.ServiceDescriptor: + namePath = append(namePath, internal.ServiceNameTag) + case protoreflect.MethodDescriptor: + namePath = append(namePath, internal.MethodNameTag) + default: + // NB: shouldn't really happen, but just in case fall back to path to + // descriptor, sans name field + } + loc := file.SourceLocations().ByPath(namePath) + if internal.IsZeroLocation(loc) { + loc = file.SourceLocations().ByPath(path) + if internal.IsZeroLocation(loc) { + return ast.UnknownSpan(file.Path()) + } + } + + return ast.NewSourceSpan( + ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + }, + ast.SourcePos{ + Filename: file.Path(), + Line: loc.EndLine, + Col: loc.EndColumn, + }, + ) +} + +func sourceSpanForNumber(fd protoreflect.FieldDescriptor) ast.SourceSpan { + file := fd.ParentFile() + if file == nil { + return ast.UnknownSpan(unknownFilePath) + } + path, ok := internal.ComputePath(fd) + if !ok { + return ast.UnknownSpan(file.Path()) + } + numberPath := path + numberPath = append(numberPath, internal.FieldNumberTag) + loc := file.SourceLocations().ByPath(numberPath) + if internal.IsZeroLocation(loc) { + loc = file.SourceLocations().ByPath(path) + if internal.IsZeroLocation(loc) { + return ast.UnknownSpan(file.Path()) + } + } + return ast.NewSourceSpan( + ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + }, + ast.SourcePos{ + Filename: file.Path(), + Line: loc.EndLine, + Col: loc.EndColumn, + }, + ) +} + +func (s *packageSymbols) commitFileLocked(f protoreflect.FileDescriptor) { + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + if s.exts == nil { + s.exts = map[extNumber]ast.SourceSpan{} + } + _ = walk.Descriptors(f, func(d protoreflect.Descriptor) error { + span := sourceSpanFor(d) + name := d.FullName() + _, isEnumValue := d.(protoreflect.EnumValueDescriptor) + s.symbols[name] = symbolEntry{span: span, isEnumValue: isEnumValue} + return nil + }) + + if s.files == nil { + s.files = map[protoreflect.FileDescriptor]struct{}{} + } + s.files[f] = struct{}{} +} + +func (s *Symbols) importResultWithExtensions(pkg *packageSymbols, r *result, handler *reporter.Handler) error { + imported, err := pkg.importResult(r, handler) + if err != nil { + return err + } + if !imported { + // nothing else to do + return nil + } + + return walk.Descriptors(r, func(d protoreflect.Descriptor) error { + fd, ok := d.(*extTypeDescriptor) + if !ok { + return nil + } + file := r.FileNode() + node := r.FieldNode(fd.FieldDescriptorProto()) + info := file.NodeInfo(node.FieldTag()) + extendee := fd.ContainingMessage() + return s.AddExtension(packageFor(extendee), extendee.FullName(), fd.Number(), info, handler) + }) +} + +func (s *Symbols) importResult(r *result, handler *reporter.Handler) error { + pkg, err := s.importPackages(packageNameSpan(r), r.Package(), handler) + if err != nil || pkg == nil { + return err + } + _, err = pkg.importResult(r, handler) + return err +} + +func (s *packageSymbols) importResult(r *result, handler *reporter.Handler) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.files[r]; ok { + // already imported + return false, nil + } + + // first pass: check for conflicts + if err := s.checkResultLocked(r, handler); err != nil { + return false, err + } + if err := handler.Error(); err != nil { + return false, err + } + + // second pass: commit all symbols + s.commitFileLocked(r) + + return true, nil +} + +func (s *packageSymbols) checkResultLocked(r *result, handler *reporter.Handler) error { + resultSyms := map[protoreflect.FullName]symbolEntry{} + return walk.Descriptors(r, func(d protoreflect.Descriptor) error { + _, isEnumVal := d.(protoreflect.EnumValueDescriptor) + file := r.FileNode() + name := d.FullName() + node := r.Node(protoutil.ProtoFromDescriptor(d)) + span := nameSpan(file, node) + // check symbols already in this symbol table + if existing, ok := s.symbols[name]; ok { + if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil { + return err + } + } + + // also check symbols from this result (that are not yet in symbol table) + if existing, ok := resultSyms[name]; ok { + if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil { + return err + } + } + resultSyms[name] = symbolEntry{ + span: span, + isEnumValue: isEnumVal, + } + + return nil + }) +} + +func packageNameSpan(r *result) ast.SourceSpan { + if node, ok := r.FileNode().(*ast.FileNode); ok { + for _, decl := range node.Decls { + if pkgNode, ok := decl.(*ast.PackageNode); ok { + return r.FileNode().NodeInfo(pkgNode.Name) + } + } + } + return ast.UnknownSpan(r.Path()) +} + +func nameSpan(file ast.FileDeclNode, n ast.Node) ast.SourceSpan { + // TODO: maybe ast package needs a NamedNode interface to simplify this? + switch n := n.(type) { + case ast.FieldDeclNode: + return file.NodeInfo(n.FieldName()) + case ast.MessageDeclNode: + return file.NodeInfo(n.MessageName()) + case ast.OneofDeclNode: + return file.NodeInfo(n.OneofName()) + case ast.EnumValueDeclNode: + return file.NodeInfo(n.GetName()) + case *ast.EnumNode: + return file.NodeInfo(n.Name) + case *ast.ServiceNode: + return file.NodeInfo(n.Name) + case ast.RPCDeclNode: + return file.NodeInfo(n.GetName()) + default: + return file.NodeInfo(n) + } +} + +// AddExtension records the given extension, which is used to ensure that no two files +// attempt to extend the same message using the same tag. The given pkg should be the +// package that defines extendee. +func (s *Symbols) AddExtension(pkg, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { + if pkg != "" { + if !strings.HasPrefix(string(extendee), string(pkg)+".") { + return handler.HandleErrorf(span, "could not register extension: extendee %q does not match package %q", extendee, pkg) + } + } + pkgSyms := s.getPackage(pkg, true) + if pkgSyms == nil { + // should never happen + return handler.HandleErrorf(span, "could not register extension: missing package symbols for %q", pkg) + } + return pkgSyms.addExtension(extendee, tag, span, handler) +} + +func (s *packageSymbols) addExtension(extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + + extNum := extNumber{extendee: extendee, tag: tag} + if existing, ok := s.exts[extNum]; ok { + return handler.HandleErrorf(span, "extension with tag %d for message %s already defined at %v", tag, extendee, existing.Start()) + } + + if s.exts == nil { + s.exts = map[extNumber]ast.SourceSpan{} + } + s.exts[extNum] = span + return nil +} + +// AddExtensionDeclaration records the given extension declaration, which is used to +// ensure that no two declarations refer to the same extension. +func (s *Symbols) AddExtensionDeclaration(extension, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { + s.extDeclsMu.Lock() + defer s.extDeclsMu.Unlock() + existing, ok := s.extDecls[extension] + if ok { + if existing.extendee == extendee && existing.tag == tag { + // This is a declaration that has already been added. Ignore. + return nil + } + return handler.HandleErrorf(span, "extension %s already declared as extending %s with tag %d at %v", extension, existing.extendee, existing.tag, existing.span.Start()) + } + if s.extDecls == nil { + s.extDecls = map[protoreflect.FullName]extDecl{} + } + s.extDecls[extension] = extDecl{ + span: span, + extendee: extendee, + tag: tag, + } + return nil +} + +// Lookup finds the registered location of the given name. If the given name has +// not been seen/registered, nil is returned. +func (s *Symbols) Lookup(name protoreflect.FullName) ast.SourceSpan { + // note: getPackage never returns nil when exact=false + pkgSyms := s.getPackage(name, false) + if entry, ok := pkgSyms.symbols[name]; ok { + return entry.span + } + return nil +} + +// LookupExtension finds the registered location of the given extension. If the given +// extension has not been seen/registered, nil is returned. +func (s *Symbols) LookupExtension(messageName protoreflect.FullName, extensionNumber protoreflect.FieldNumber) ast.SourceSpan { + // note: getPackage never returns nil when exact=false + pkgSyms := s.getPackage(messageName, false) + return pkgSyms.exts[extNumber{messageName, extensionNumber}] +} + +type nameEnumerator struct { + name protoreflect.FullName + start int +} + +func (e *nameEnumerator) next() (protoreflect.FullName, bool) { + if e.start < 0 { + return "", false + } + pos := strings.IndexByte(string(e.name[e.start:]), '.') + if pos == -1 { + e.start = -1 + return e.name, true + } + pos += e.start + e.start = pos + 1 + return e.name[:pos], true +} diff --git a/vendor/github.com/bufbuild/protocompile/linker/validate.go b/vendor/github.com/bufbuild/protocompile/linker/validate.go new file mode 100644 index 00000000..6633a9f3 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/linker/validate.go @@ -0,0 +1,1153 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linker + +import ( + "fmt" + "math" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/protoutil" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +// ValidateOptions runs some validation checks on the result that can only +// be done after options are interpreted. +func (r *result) ValidateOptions(handler *reporter.Handler, symbols *Symbols) error { + if err := r.validateFile(handler); err != nil { + return err + } + return walk.Descriptors(r, func(d protoreflect.Descriptor) error { + switch d := d.(type) { + case protoreflect.FieldDescriptor: + if err := r.validateField(d, handler); err != nil { + return err + } + case protoreflect.MessageDescriptor: + if symbols == nil { + symbols = &Symbols{} + } + if err := r.validateMessage(d, handler, symbols); err != nil { + return err + } + case protoreflect.EnumDescriptor: + if err := r.validateEnum(d, handler); err != nil { + return err + } + } + return nil + }) +} + +func (r *result) validateFile(handler *reporter.Handler) error { + opts := r.FileDescriptorProto().GetOptions() + if opts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME { + // Non-lite files may not import lite files. + imports := r.Imports() + for i, length := 0, imports.Len(); i < length; i++ { + dep := imports.Get(i) + depOpts, ok := dep.Options().(*descriptorpb.FileOptions) + if !ok { + continue // what else to do? + } + if depOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME { + err := handler.HandleErrorf(r.getImportLocation(dep.Path()), "a file that does not use optimize_for=LITE_RUNTIME may not import file %q that does", dep.Path()) + if err != nil { + return err + } + } + } + } + if isEditions(r) { + // Validate features + if opts.GetFeatures().GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED { + span := r.findOptionSpan(r, internal.FileOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) + err := handler.HandleErrorf(span, "LEGACY_REQUIRED field presence cannot be set as the default for a file") + if err != nil { + return err + } + } + if opts != nil && opts.JavaStringCheckUtf8 != nil { + span := r.findOptionSpan(r, internal.FileOptionsJavaStringCheckUTF8Tag) + err := handler.HandleErrorf(span, `file option java_string_check_utf8 is not allowed with editions; import "google/protobuf/java_features.proto" and use (pb.java).utf8_validation instead`) + if err != nil { + return err + } + } + } + return nil +} + +func (r *result) validateField(fld protoreflect.FieldDescriptor, handler *reporter.Handler) error { + if xtd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + fld = xtd.Descriptor() + } + fd, ok := fld.(*fldDescriptor) + if !ok { + // should not be possible + return fmt.Errorf("field descriptor is wrong type: expecting %T, got %T", (*fldDescriptor)(nil), fld) + } + + if err := r.validatePacked(fd, handler); err != nil { + return err + } + if fd.Kind() == protoreflect.EnumKind { + requiresOpen := !fd.IsList() && !fd.HasPresence() + if requiresOpen && fd.Enum().IsClosed() { + // Fields in a proto3 message cannot refer to proto2 enums. + // In editions, this translates to implicit presence fields + // not being able to refer to closed enums. + // TODO: This really should be based solely on whether the enum's first + // value is zero, NOT based on if it's open vs closed. + // https://github.com/protocolbuffers/protobuf/issues/16249 + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) + if err := handler.HandleErrorf(info, "cannot use closed enum %s in a field with implicit presence", fd.Enum().FullName()); err != nil { + return err + } + } + } + if fd.HasDefault() && !fd.HasPresence() { + span := r.findScalarOptionSpan(r.FieldNode(fd.proto), "default") + err := handler.HandleErrorf(span, "default value is not allowed on fields with implicit presence") + if err != nil { + return err + } + } + if fd.proto.Options != nil && fd.proto.Options.Ctype != nil { + if descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2024 { + // We don't support edition 2024 yet, but we went ahead and mimic'ed this check + // from protoc, which currently has experimental support for 2024. + span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) + if err := handler.HandleErrorf(span, "ctype option cannot be used as of edition 2024; use features.string_type instead"); err != nil { + return err + } + } else if descriptorpb.Edition(r.Edition()) == descriptorpb.Edition_EDITION_2023 { + if fld.Kind() != protoreflect.StringKind && fld.Kind() != protoreflect.BytesKind { + span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) + if err := handler.HandleErrorf(span, "ctype option can only be used on string and bytes fields"); err != nil { + return err + } + } + if fd.proto.Options.GetCtype() == descriptorpb.FieldOptions_CORD && fd.IsExtension() { + span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) + if err := handler.HandleErrorf(span, "ctype option cannot be CORD for extension fields"); err != nil { + return err + } + } + } + } + if (fd.proto.Options.GetLazy() || fd.proto.Options.GetUnverifiedLazy()) && fd.Kind() != protoreflect.MessageKind { + var span ast.SourceSpan + var optionName string + if fd.proto.Options.GetLazy() { + span = r.findOptionSpan(fd, internal.FieldOptionsLazyTag) + optionName = "lazy" + } else { + span = r.findOptionSpan(fd, internal.FieldOptionsUnverifiedLazyTag) + optionName = "unverified_lazy" + } + var suffix string + if fd.Kind() == protoreflect.GroupKind { + if isEditions(r) { + suffix = " that use length-prefixed encoding" + } else { + suffix = ", not groups" + } + } + if err := handler.HandleErrorf(span, "%s option can only be used with message fields%s", optionName, suffix); err != nil { + return err + } + } + if fd.proto.Options.GetJstype() != descriptorpb.FieldOptions_JS_NORMAL { + switch fd.Kind() { + case protoreflect.Int64Kind, protoreflect.Uint64Kind, protoreflect.Sint64Kind, + protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind: + // allowed only for 64-bit integer types + default: + span := r.findOptionSpan(fd, internal.FieldOptionsJSTypeTag) + err := handler.HandleErrorf(span, "only 64-bit integer fields (int64, uint64, sint64, fixed64, and sfixed64) can specify a jstype other than JS_NORMAL") + if err != nil { + return err + } + } + } + if isEditions(r) { + if err := r.validateFieldFeatures(fd, handler); err != nil { + return err + } + } + + if fld.IsExtension() { + // More checks if this is an extension field. + if err := r.validateExtension(fd, handler); err != nil { + return err + } + } + + return nil +} + +func (r *result) validateExtension(fd *fldDescriptor, handler *reporter.Handler) error { + // NB: It's a little gross that we don't enforce these in validateBasic(). + // But it requires linking to resolve the extendee, so we can interrogate + // its descriptor. + msg := fd.ContainingMessage() + if msg.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() { + // Message set wire format requires that all extensions be messages + // themselves (no scalar extensions) + if fd.Kind() != protoreflect.MessageKind { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) + err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain scalar extensions, only messages") + if err != nil { + return err + } + } + if fd.Cardinality() == protoreflect.Repeated { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) + err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain repeated extensions, only optional") + if err != nil { + return err + } + } + } else if fd.Number() > internal.MaxNormalTag { + // In validateBasic() we just made sure these were within bounds for any message. But + // now that things are linked, we can check if the extendee is messageset wire format + // and, if not, enforce tighter limit. + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) + err := handler.HandleErrorf(info, "tag number %d is higher than max allowed tag number (%d)", fd.Number(), internal.MaxNormalTag) + if err != nil { + return err + } + } + + fileOpts := r.FileDescriptorProto().GetOptions() + if fileOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME { + extendeeFileOpts, _ := msg.ParentFile().Options().(*descriptorpb.FileOptions) + if extendeeFileOpts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto)) + err := handler.HandleErrorf(info, "extensions in a file that uses optimize_for=LITE_RUNTIME may not extend messages in file %q which does not", msg.ParentFile().Path()) + if err != nil { + return err + } + } + } + + // If the extendee uses extension declarations, make sure this extension matches. + md := protoutil.ProtoFromMessageDescriptor(msg) + for i, extRange := range md.ExtensionRange { + if int32(fd.Number()) < extRange.GetStart() || int32(fd.Number()) >= extRange.GetEnd() { + continue + } + extRangeOpts := extRange.GetOptions() + if extRangeOpts == nil { + break + } + if len(extRangeOpts.Declaration) == 0 && extRangeOpts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION { + break + } + var found bool + for j, extDecl := range extRangeOpts.Declaration { + if extDecl.GetNumber() != int32(fd.Number()) { + continue + } + found = true + if extDecl.GetReserved() { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) + span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationReservedTag) + err := handler.HandleErrorf(info, "cannot use field number %d for an extension because it is reserved in declaration at %v", + fd.Number(), span.Start()) + if err != nil { + return err + } + break + } + if extDecl.GetFullName() != "."+string(fd.FullName()) { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldName()) + span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationFullNameTag) + err := handler.HandleErrorf(info, "expected extension with number %d to be named %s, not %s, per declaration at %v", + fd.Number(), strings.TrimPrefix(extDecl.GetFullName(), "."), fd.FullName(), span.Start()) + if err != nil { + return err + } + } + if extDecl.GetType() != getTypeName(fd) { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) + span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationTypeTag) + err := handler.HandleErrorf(info, "expected extension with number %d to have type %s, not %s, per declaration at %v", + fd.Number(), strings.TrimPrefix(extDecl.GetType(), "."), getTypeName(fd), span.Start()) + if err != nil { + return err + } + } + if extDecl.GetRepeated() != (fd.Cardinality() == protoreflect.Repeated) { + expected, actual := "repeated", "optional" + if !extDecl.GetRepeated() { + expected, actual = actual, expected + } + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) + span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationRepeatedTag) + err := handler.HandleErrorf(info, "expected extension with number %d to be %s, not %s, per declaration at %v", + fd.Number(), expected, actual, span.Start()) + if err != nil { + return err + } + } + break + } + if !found { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) + span, _ := findExtensionRangeOptionSpan(fd.ParentFile(), msg, i, extRange, + internal.ExtensionRangeOptionsVerificationTag) + err := handler.HandleErrorf(info, "expected extension with number %d to be declared in type %s, but no declaration found at %v", + fd.Number(), fd.ContainingMessage().FullName(), span.Start()) + if err != nil { + return err + } + } + } + + return nil +} + +func (r *result) validatePacked(fd *fldDescriptor, handler *reporter.Handler) error { + if fd.proto.Options != nil && fd.proto.Options.Packed != nil && isEditions(r) { + span := r.findOptionSpan(fd, internal.FieldOptionsPackedTag) + err := handler.HandleErrorf(span, "packed option cannot be used with editions; use features.repeated_field_encoding=PACKED instead") + if err != nil { + return err + } + } + if !fd.proto.GetOptions().GetPacked() { + // if packed isn't true, nothing to validate + return nil + } + if fd.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) + err := handler.HandleErrorf(info, "packed option is only allowed on repeated fields") + if err != nil { + return err + } + } + switch fd.proto.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_STRING, descriptorpb.FieldDescriptorProto_TYPE_BYTES, + descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, descriptorpb.FieldDescriptorProto_TYPE_GROUP: + file := r.FileNode() + info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) + err := handler.HandleErrorf(info, "packed option is only allowed on numeric, boolean, and enum fields") + if err != nil { + return err + } + } + return nil +} + +func (r *result) validateFieldFeatures(fld *fldDescriptor, handler *reporter.Handler) error { + if msg, ok := fld.Parent().(*msgDescriptor); ok && msg.proto.GetOptions().GetMapEntry() { + // Skip validating features on fields of synthetic map entry messages. + // We blindly propagate them from the map field's features, but some may + // really only apply to the map field and not to a key or value entry field. + return nil + } + features := fld.proto.GetOptions().GetFeatures() + if features == nil { + // No features to validate. + return nil + } + if features.FieldPresence != nil { + switch { + case fld.proto.OneofIndex != nil: + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) + if err := handler.HandleErrorf(span, "oneof fields may not specify field presence"); err != nil { + return err + } + case fld.Cardinality() == protoreflect.Repeated: + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) + if err := handler.HandleErrorf(span, "repeated fields may not specify field presence"); err != nil { + return err + } + case fld.IsExtension(): + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) + if err := handler.HandleErrorf(span, "extension fields may not specify field presence"); err != nil { + return err + } + case fld.Message() != nil && features.GetFieldPresence() == descriptorpb.FeatureSet_IMPLICIT: + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) + if err := handler.HandleErrorf(span, "message fields may not specify implicit presence"); err != nil { + return err + } + } + } + if features.RepeatedFieldEncoding != nil { + if fld.Cardinality() != protoreflect.Repeated { + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag) + if err := handler.HandleErrorf(span, "only repeated fields may specify repeated field encoding"); err != nil { + return err + } + } else if !internal.CanPack(fld.Kind()) && features.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED { + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag) + if err := handler.HandleErrorf(span, "only repeated primitive fields may specify packed encoding"); err != nil { + return err + } + } + } + if features.Utf8Validation != nil { + isMap := fld.IsMap() + if (!isMap && fld.Kind() != protoreflect.StringKind) || + (isMap && + fld.MapKey().Kind() != protoreflect.StringKind && + fld.MapValue().Kind() != protoreflect.StringKind) { + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetUTF8ValidationTag) + if err := handler.HandleErrorf(span, "only string fields may specify UTF8 validation"); err != nil { + return err + } + } + } + if features.MessageEncoding != nil { + if fld.Message() == nil || fld.IsMap() { + span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetMessageEncodingTag) + if err := handler.HandleErrorf(span, "only message fields may specify message encoding"); err != nil { + return err + } + } + } + return nil +} + +func (r *result) validateMessage(d protoreflect.MessageDescriptor, handler *reporter.Handler, symbols *Symbols) error { + md, ok := d.(*msgDescriptor) + if !ok { + // should not be possible + return fmt.Errorf("message descriptor is wrong type: expecting %T, got %T", (*msgDescriptor)(nil), d) + } + + if err := r.validateJSONNamesInMessage(md, handler); err != nil { + return err + } + + return r.validateExtensionDeclarations(md, handler, symbols) +} + +func (r *result) validateJSONNamesInMessage(md *msgDescriptor, handler *reporter.Handler) error { + if err := r.validateFieldJSONNames(md, false, handler); err != nil { + return err + } + if err := r.validateFieldJSONNames(md, true, handler); err != nil { + return err + } + return nil +} + +func (r *result) validateEnum(d protoreflect.EnumDescriptor, handler *reporter.Handler) error { + ed, ok := d.(*enumDescriptor) + if !ok { + // should not be possible + return fmt.Errorf("enum descriptor is wrong type: expecting %T, got %T", (*enumDescriptor)(nil), d) + } + + firstValue := ed.Values().Get(0) + if !ed.IsClosed() && firstValue.Number() != 0 { + // TODO: This check doesn't really belong here. Whether the + // first value is zero s/b orthogonal to whether the + // allowed values are open or closed. + // https://github.com/protocolbuffers/protobuf/issues/16249 + file := r.FileNode() + evd, ok := firstValue.(*enValDescriptor) + if !ok { + // should not be possible + return fmt.Errorf("enum value descriptor is wrong type: expecting %T, got %T", (*enValDescriptor)(nil), firstValue) + } + info := file.NodeInfo(r.EnumValueNode(evd.proto).GetNumber()) + if err := handler.HandleErrorf(info, "first value of open enum %s must have numeric value zero", ed.FullName()); err != nil { + return err + } + } + + if err := r.validateJSONNamesInEnum(ed, handler); err != nil { + return err + } + + return nil +} + +func (r *result) validateJSONNamesInEnum(ed *enumDescriptor, handler *reporter.Handler) error { + seen := map[string]*descriptorpb.EnumValueDescriptorProto{} + for _, evd := range ed.proto.GetValue() { + scope := "enum value " + ed.proto.GetName() + "." + evd.GetName() + + name := canonicalEnumValueName(evd.GetName(), ed.proto.GetName()) + if existing, ok := seen[name]; ok && evd.GetNumber() != existing.GetNumber() { + fldNode := r.EnumValueNode(evd) + existingNode := r.EnumValueNode(existing) + conflictErr := fmt.Errorf("%s: camel-case name (with optional enum name prefix removed) %q conflicts with camel-case name of enum value %s, defined at %v", + scope, name, existing.GetName(), r.FileNode().NodeInfo(existingNode).Start()) + + // Since proto2 did not originally have a JSON format, we report conflicts as just warnings. + // With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT + if !isJSONCompliant(ed) { + handler.HandleWarningWithPos(r.FileNode().NodeInfo(fldNode), conflictErr) + } else if err := handler.HandleErrorWithPos(r.FileNode().NodeInfo(fldNode), conflictErr); err != nil { + return err + } + } else { + seen[name] = evd + } + } + return nil +} + +func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handler *reporter.Handler) error { + type jsonName struct { + source *descriptorpb.FieldDescriptorProto + // true if orig is a custom JSON name (vs. the field's default JSON name) + custom bool + } + seen := map[string]jsonName{} + + for _, fd := range md.proto.GetField() { + scope := "field " + md.proto.GetName() + "." + fd.GetName() + defaultName := internal.JSONName(fd.GetName()) + name := defaultName + custom := false + if useCustom { + n := fd.GetJsonName() + if n != defaultName || r.hasCustomJSONName(fd) { + name = n + custom = true + } + } + if existing, ok := seen[name]; ok { + // When useCustom is true, we'll only report an issue when a conflict is + // due to a custom name. That way, we don't double report conflicts on + // non-custom names. + if !useCustom || custom || existing.custom { + fldNode := r.FieldNode(fd) + customStr, srcCustomStr := "custom", "custom" + if !custom { + customStr = "default" + } + if !existing.custom { + srcCustomStr = "default" + } + info := r.FileNode().NodeInfo(fldNode) + conflictErr := reporter.Errorf(info, "%s: %s JSON name %q conflicts with %s JSON name of field %s, defined at %v", + scope, customStr, name, srcCustomStr, existing.source.GetName(), r.FileNode().NodeInfo(r.FieldNode(existing.source)).Start()) + + // Since proto2 did not originally have default JSON names, we report conflicts + // between default names (neither is a custom name) as just warnings. + // With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT + if !isJSONCompliant(md) && !custom && !existing.custom { + handler.HandleWarning(conflictErr) + } else if err := handler.HandleError(conflictErr); err != nil { + return err + } + } + } else { + seen[name] = jsonName{source: fd, custom: custom} + } + } + return nil +} + +func (r *result) validateExtensionDeclarations(md *msgDescriptor, handler *reporter.Handler, symbols *Symbols) error { + for i, extRange := range md.proto.ExtensionRange { + opts := extRange.GetOptions() + if len(opts.GetDeclaration()) == 0 { + // nothing to check + continue + } + // If any declarations are present, verification is assumed to be + // DECLARATION. It's an error for declarations to be present but the + // verification field explicitly set to something other than that. + if opts.Verification != nil && opts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION { + span, ok := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsVerificationTag) + if !ok { + span, _ = findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, 0) + } + if err := handler.HandleErrorf(span, "extension range cannot have declarations and have verification of %s", opts.GetVerification()); err != nil { + return err + } + } + declsByTag := map[int32]ast.SourcePos{} + for i, extDecl := range extRange.GetOptions().GetDeclaration() { + if extDecl.Number == nil { + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) + if err := handler.HandleErrorf(span, "extension declaration is missing required field number"); err != nil { + return err + } + } else { + extensionNumberSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationNumberTag) + if extDecl.GetNumber() < extRange.GetStart() || extDecl.GetNumber() >= extRange.GetEnd() { + // Number is out of range. + // See if one of the other ranges on the same extends statement includes the number, + // so we can provide a helpful message. + var suffix string + if extRange, ok := r.ExtensionsNode(extRange).(*ast.ExtensionRangeNode); ok { + for _, rng := range extRange.Ranges { + start, _ := rng.StartVal.AsInt64() + var end int64 + switch { + case rng.Max != nil: + end = math.MaxInt64 + case rng.EndVal != nil: + end, _ = rng.EndVal.AsInt64() + default: + end = start + } + if int64(extDecl.GetNumber()) >= start && int64(extDecl.GetNumber()) <= end { + // Found another range that matches + suffix = "; when using declarations, extends statements should indicate only a single span of field numbers" + break + } + } + } + err := handler.HandleErrorf(extensionNumberSpan, "extension declaration has number outside the range: %d not in [%d,%d]%s", + extDecl.GetNumber(), extRange.GetStart(), extRange.GetEnd()-1, suffix) + if err != nil { + return err + } + } else { + // Valid number; make sure it's not a duplicate + if existing, ok := declsByTag[extDecl.GetNumber()]; ok { + err := handler.HandleErrorf(extensionNumberSpan, "extension for tag number %d already declared at %v", + extDecl.GetNumber(), existing) + if err != nil { + return err + } + } else { + declsByTag[extDecl.GetNumber()] = extensionNumberSpan.Start() + } + } + } + + if extDecl.FullName == nil && !extDecl.GetReserved() { + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) + if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a full_name"); err != nil { + return err + } + } else if extDecl.FullName != nil { + var extensionFullName protoreflect.FullName + extensionNameSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationFullNameTag) + if !strings.HasPrefix(extDecl.GetFullName(), ".") { + if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q should start with a leading dot (.)", extDecl.GetFullName()); err != nil { + return err + } + extensionFullName = protoreflect.FullName(extDecl.GetFullName()) + } else { + extensionFullName = protoreflect.FullName(extDecl.GetFullName()[1:]) + } + if !extensionFullName.IsValid() { + if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q is not a valid qualified name", extDecl.GetFullName()); err != nil { + return err + } + } + if err := symbols.AddExtensionDeclaration(extensionFullName, md.FullName(), protoreflect.FieldNumber(extDecl.GetNumber()), extensionNameSpan, handler); err != nil { + return err + } + } + + if extDecl.Type == nil && !extDecl.GetReserved() { + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) + if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a type"); err != nil { + return err + } + } else if extDecl.Type != nil { + if strings.HasPrefix(extDecl.GetType(), ".") { + if !protoreflect.FullName(extDecl.GetType()[1:]).IsValid() { + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag) + if err := handler.HandleErrorf(span, "extension declaration type %q is not a valid qualified name", extDecl.GetType()); err != nil { + return err + } + } + } else if !isBuiltinTypeName(extDecl.GetType()) { + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag) + if err := handler.HandleErrorf(span, "extension declaration type %q must be a builtin type or start with a leading dot (.)", extDecl.GetType()); err != nil { + return err + } + } + } + + if extDecl.GetReserved() && (extDecl.FullName == nil) != (extDecl.Type == nil) { + var fieldTag int32 + if extDecl.FullName != nil { + fieldTag = internal.ExtensionRangeOptionsDeclarationFullNameTag + } else { + fieldTag = internal.ExtensionRangeOptionsDeclarationTypeTag + } + span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, + internal.ExtensionRangeOptionsDeclarationTag, int32(i), fieldTag) + if err := handler.HandleErrorf(span, "extension declarations that are reserved should specify both full_name and type or neither"); err != nil { + return err + } + } + } + } + return nil +} + +func (r *result) hasCustomJSONName(fdProto *descriptorpb.FieldDescriptorProto) bool { + // if we have the AST, we can more precisely determine if there was a custom + // JSON named defined, even if it is explicitly configured to tbe the same + // as the default JSON name for the field. + opts := r.FieldNode(fdProto).GetOptions() + if opts == nil { + return false + } + for _, opt := range opts.Options { + if len(opt.Name.Parts) == 1 && + opt.Name.Parts[0].Name.AsIdentifier() == "json_name" && + !opt.Name.Parts[0].IsExtension() { + return true + } + } + return false +} + +func canonicalEnumValueName(enumValueName, enumName string) string { + return enumValCamelCase(removePrefix(enumValueName, enumName)) +} + +// removePrefix is used to remove the given prefix from the given str. It does not require +// an exact match and ignores case and underscores. If the all non-underscore characters +// would be removed from str, str is returned unchanged. If str does not have the given +// prefix (even with the very lenient matching, in regard to case and underscores), then +// str is returned unchanged. +// +// The algorithm is adapted from the protoc source: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L922 +func removePrefix(str, prefix string) string { + j := 0 + for i, r := range str { + if r == '_' { + // skip underscores in the input + continue + } + + p, sz := utf8.DecodeRuneInString(prefix[j:]) + for p == '_' { + j += sz // consume/skip underscore + p, sz = utf8.DecodeRuneInString(prefix[j:]) + } + + if j == len(prefix) { + // matched entire prefix; return rest of str + // but skipping any leading underscores + result := strings.TrimLeft(str[i:], "_") + if len(result) == 0 { + // result can't be empty string + return str + } + return result + } + if unicode.ToLower(r) != unicode.ToLower(p) { + // does not match prefix + return str + } + j += sz // consume matched rune of prefix + } + return str +} + +// enumValCamelCase converts the given string to upper-camel-case. +// +// The algorithm is adapted from the protoc source: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L887 +func enumValCamelCase(name string) string { + var js []rune + nextUpper := true + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, unicode.ToLower(r)) + } + } + return string(js) +} + +func isBuiltinTypeName(typeName string) bool { + switch typeName { + case "int32", "int64", "uint32", "uint64", "sint32", "sint64", + "fixed32", "fixed64", "sfixed32", "sfixed64", + "bool", "double", "float", "string", "bytes": + return true + default: + return false + } +} + +func getTypeName(fd protoreflect.FieldDescriptor) string { + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + return "." + string(fd.Message().FullName()) + case protoreflect.EnumKind: + return "." + string(fd.Enum().FullName()) + default: + return fd.Kind().String() + } +} + +func findExtensionRangeOptionSpan( + file protoreflect.FileDescriptor, + extended protoreflect.MessageDescriptor, + extRangeIndex int, + extRange *descriptorpb.DescriptorProto_ExtensionRange, + path ...int32, +) (ast.SourceSpan, bool) { + // NB: Typically, we have an AST for a file and NOT source code info, because the + // compiler validates options before computing source code info. However, we might + // be validating an extension (whose source/AST we have), but whose extendee (and + // thus extension range options for declarations) could be in some other file, which + // could be provided to the compiler as an already-compiled descriptor. So this + // function can fallback to using source code info if an AST is not available. + + if r, ok := file.(Result); ok && r.AST() != nil { + // Find the location using the AST, which will generally be higher fidelity + // than what we might find in a file descriptor's source code info. + exts := r.ExtensionsNode(extRange) + return findOptionSpan(r.FileNode(), exts, extRange.Options.ProtoReflect().Descriptor(), path...) + } + + srcLocs := file.SourceLocations() + if srcLocs.Len() == 0 { + // no source code info, can't do any better than the filename. We + // return true as the boolean so the caller doesn't try again with + // an alternate path, since we won't be able to do any better. + return ast.UnknownSpan(file.Path()), true + } + msgPath, ok := internal.ComputePath(extended) + if !ok { + // Same as above: return true since no subsequent query can do better. + return ast.UnknownSpan(file.Path()), true + } + + //nolint:gocritic // intentionally assigning to different slice variables + extRangePath := append(msgPath, internal.MessageExtensionRangesTag, int32(extRangeIndex)) + optsPath := append(extRangePath, internal.ExtensionRangeOptionsTag) //nolint:gocritic + fullPath := append(optsPath, path...) //nolint:gocritic + srcLoc := srcLocs.ByPath(fullPath) + if srcLoc.Path != nil { + // found it + return asSpan(file.Path(), srcLoc), true + } + + // Slow path to find closest match :/ + // We look for longest matching path that is at least len(extRangePath) + // long. If we find a path that is longer (meaning a path that points INSIDE + // the request element), accept the first such location. + var bestMatch protoreflect.SourceLocation + var bestMatchPathLen int + for i, length := 0, srcLocs.Len(); i < length; i++ { + srcLoc := srcLocs.Get(i) + if len(srcLoc.Path) >= len(extRangePath) && + isDescendantPath(fullPath, srcLoc.Path) && + len(srcLoc.Path) > bestMatchPathLen { + bestMatch = srcLoc + bestMatchPathLen = len(srcLoc.Path) + } else if isDescendantPath(srcLoc.Path, path) { + return asSpan(file.Path(), srcLoc), false + } + } + if bestMatchPathLen > 0 { + return asSpan(file.Path(), bestMatch), false + } + return ast.UnknownSpan(file.Path()), false +} + +func (r *result) findScalarOptionSpan( + root ast.NodeWithOptions, + name string, +) ast.SourceSpan { + match := ast.Node(root) + root.RangeOptions(func(n *ast.OptionNode) bool { + if len(n.Name.Parts) == 1 && !n.Name.Parts[0].IsExtension() && + string(n.Name.Parts[0].Name.AsIdentifier()) == name { + match = n + return false + } + return true + }) + return r.FileNode().NodeInfo(match) +} + +func (r *result) findOptionSpan( + d protoutil.DescriptorProtoWrapper, + path ...int32, +) ast.SourceSpan { + node := r.Node(d.AsProto()) + nodeWithOpts, ok := node.(ast.NodeWithOptions) + if !ok { + return r.FileNode().NodeInfo(node) + } + span, _ := findOptionSpan(r.FileNode(), nodeWithOpts, d.Options().ProtoReflect().Descriptor(), path...) + return span +} + +func findOptionSpan( + file ast.FileDeclNode, + root ast.NodeWithOptions, + md protoreflect.MessageDescriptor, + path ...int32, +) (ast.SourceSpan, bool) { + bestMatch := ast.Node(root) + var bestMatchLen int + var repeatedIndices []int + root.RangeOptions(func(n *ast.OptionNode) bool { + desc := md + limit := len(n.Name.Parts) + if limit > len(path) { + limit = len(path) + } + var nextIsIndex bool + for i := 0; i < limit; i++ { + if desc == nil || nextIsIndex { + // Can't match anymore. Try next option. + return true + } + wantField := desc.Fields().ByNumber(protoreflect.FieldNumber(path[i])) + if wantField == nil { + // Should not be possible... next option won't fare any better since + // it's a disagreement between given path and given descriptor so bail. + return false + } + if n.Name.Parts[i].Open != nil || + string(n.Name.Parts[i].Name.AsIdentifier()) != string(wantField.Name()) { + // This is an extension/custom option or indicates the wrong name. + // Try the next one. + return true + } + desc = wantField.Message() + nextIsIndex = wantField.Cardinality() == protoreflect.Repeated + } + // If we made it this far, we've matched everything so far. + if len(n.Name.Parts) >= len(path) { + // Either an exact match (if equal) or this option points *inside* the + // item we care about (if greater). Either way, the first such result + // is a keeper. + bestMatch = n.Name.Parts[len(path)-1] + bestMatchLen = len(n.Name.Parts) + return false + } + // We've got more path elements to try to match with the value. + match, matchLen := findMatchingValueNode( + desc, + path[len(n.Name.Parts):], + nextIsIndex, + 0, + &repeatedIndices, + n, + n.Val) + if match != nil { + totalMatchLen := matchLen + len(n.Name.Parts) + if totalMatchLen > bestMatchLen { + bestMatch, bestMatchLen = match, totalMatchLen + } + } + return bestMatchLen != len(path) // no exact match, so keep looking + }) + return file.NodeInfo(bestMatch), bestMatchLen == len(path) +} + +func findMatchingValueNode( + md protoreflect.MessageDescriptor, + path protoreflect.SourcePath, + currIsRepeated bool, + repeatedCount int, + repeatedIndices *[]int, + node ast.Node, + val ast.ValueNode, +) (ast.Node, int) { + var matchLen int + var index int + if currIsRepeated { + // Compute the index of the current value (or, if an array literal, the + // index of the first value in the array). + if len(*repeatedIndices) > repeatedCount { + (*repeatedIndices)[repeatedCount]++ + index = (*repeatedIndices)[repeatedCount] + } else { + *repeatedIndices = append(*repeatedIndices, 0) + index = 0 + } + repeatedCount++ + } + + if arrayVal, ok := val.(*ast.ArrayLiteralNode); ok { + if !currIsRepeated { + // This should not happen. + return nil, 0 + } + offset := int(path[0]) - index + if offset >= len(arrayVal.Elements) { + // The index we are looking for is not in this array. + return nil, 0 + } + elem := arrayVal.Elements[offset] + // We've matched the index! + matchLen++ + path = path[1:] + // Recurse into array element. + nextMatch, nextMatchLen := findMatchingValueNode( + md, + path, + false, + repeatedCount, + repeatedIndices, + elem, + elem, + ) + return nextMatch, nextMatchLen + matchLen + } + + if currIsRepeated { + if index != int(path[0]) { + // Not a match! + return nil, 0 + } + // We've matched the index! + matchLen++ + path = path[1:] + if len(path) == 0 { + // We're done matching! + return node, matchLen + } + } + + msgValue, ok := val.(*ast.MessageLiteralNode) + if !ok { + // We can't go any further + return node, matchLen + } + + var wantField protoreflect.FieldDescriptor + if md != nil { + wantField = md.Fields().ByNumber(protoreflect.FieldNumber(path[0])) + } + if wantField == nil { + // Should not be possible... next option won't fare any better since + // it's a disagreement between given path and given descriptor so bail. + return nil, 0 + } + for _, field := range msgValue.Elements { + if field.Name.Open != nil || + string(field.Name.Name.AsIdentifier()) != string(wantField.Name()) { + // This is an extension/custom option or indicates the wrong name. + // Try the next one. + continue + } + // We've matched this field. + matchLen++ + path = path[1:] + if len(path) == 0 { + // Perfect match! + return field, matchLen + } + nextMatch, nextMatchLen := findMatchingValueNode( + wantField.Message(), + path, + wantField.Cardinality() == protoreflect.Repeated, + repeatedCount, + repeatedIndices, + field, + field.Val, + ) + return nextMatch, nextMatchLen + matchLen + } + + // If we didn't find the right field, just return what we have so far. + return node, matchLen +} + +func isDescendantPath(descendant, ancestor protoreflect.SourcePath) bool { + if len(descendant) < len(ancestor) { + return false + } + for i := range ancestor { + if descendant[i] != ancestor[i] { + return false + } + } + return true +} + +func asSpan(file string, srcLoc protoreflect.SourceLocation) ast.SourceSpan { + return ast.NewSourceSpan( + ast.SourcePos{ + Filename: file, + Line: srcLoc.StartLine + 1, + Col: srcLoc.StartColumn + 1, + }, + ast.SourcePos{ + Filename: file, + Line: srcLoc.EndLine + 1, + Col: srcLoc.EndColumn + 1, + }, + ) +} + +func (r *result) getImportLocation(path string) ast.SourceSpan { + node, ok := r.FileNode().(*ast.FileNode) + if !ok { + return ast.UnknownSpan(path) + } + for _, decl := range node.Decls { + imp, ok := decl.(*ast.ImportNode) + if !ok { + continue + } + if imp.Name.AsString() == path { + return node.NodeInfo(imp.Name) + } + } + // Couldn't find it? Should never happen... + return ast.UnknownSpan(path) +} + +func isEditions(r *result) bool { + return descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2023 +} diff --git a/vendor/github.com/bufbuild/protocompile/options/options.go b/vendor/github.com/bufbuild/protocompile/options/options.go new file mode 100644 index 00000000..e22bb2af --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/options/options.go @@ -0,0 +1,2267 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options contains the logic for interpreting options. The parse step +// of compilation stores the options in uninterpreted form, which contains raw +// identifiers and literal values. +// +// The process of interpreting an option is to resolve identifiers, by examining +// descriptors for the google.protobuf.*Options types and their available +// extensions (custom options). As field names are resolved, the values can be +// type-checked against the types indicated in field descriptors. +// +// On success, the various fields and extensions of the options message are +// populated and the field holding the uninterpreted form is cleared. +package options + +import ( + "bytes" + "errors" + "fmt" + "math" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/internal/messageset" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/sourceinfo" +) + +type interpreter struct { + file file + resolver linker.Resolver + overrideDescriptorProto linker.File + + index sourceinfo.OptionIndex + pathBuffer []int32 + + reporter *reporter.Handler + lenient bool + + // lenienceEnabled is set to true when errors reported to reporter + // should be lenient + lenienceEnabled bool + lenientErrReported bool +} + +type file interface { + parser.Result + ResolveMessageLiteralExtensionName(ast.IdentValueNode) string +} + +type noResolveFile struct { + parser.Result +} + +func (n noResolveFile) ResolveMessageLiteralExtensionName(ast.IdentValueNode) string { + return "" +} + +// InterpreterOption is an option that can be passed to InterpretOptions and +// its variants. +type InterpreterOption func(*interpreter) + +// WithOverrideDescriptorProto returns an option that indicates that the given file +// should be consulted when looking up a definition for an option type. The given +// file should usually have the path "google/protobuf/descriptor.proto". The given +// file will only be consulted if the option type is otherwise not visible to the +// file whose options are being interpreted. +func WithOverrideDescriptorProto(f linker.File) InterpreterOption { + return func(interp *interpreter) { + interp.overrideDescriptorProto = f + } +} + +// InterpretOptions interprets options in the given linked result, returning +// an index that can be used to generate source code info. This step mutates +// the linked result's underlying proto to move option elements out of the +// "uninterpreted_option" fields and into proper option fields and extensions. +// +// The given handler is used to report errors and warnings. If any errors are +// reported, this function returns a non-nil error. +func InterpretOptions(linked linker.Result, handler *reporter.Handler, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { + return interpretOptions(false, linked, linker.ResolverFromFile(linked), handler, opts) +} + +// InterpretOptionsLenient interprets options in a lenient/best-effort way in +// the given linked result, returning an index that can be used to generate +// source code info. This step mutates the linked result's underlying proto to +// move option elements out of the "uninterpreted_option" fields and into proper +// option fields and extensions. +// +// In lenient more, errors resolving option names and type errors are ignored. +// Any options that are uninterpretable (due to such errors) will remain in the +// "uninterpreted_option" fields. +func InterpretOptionsLenient(linked linker.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { + return interpretOptions(true, linked, linker.ResolverFromFile(linked), reporter.NewHandler(nil), opts) +} + +// InterpretUnlinkedOptions does a best-effort attempt to interpret options in +// the given parsed result, returning an index that can be used to generate +// source code info. This step mutates the parsed result's underlying proto to +// move option elements out of the "uninterpreted_option" fields and into proper +// option fields and extensions. +// +// This is the same as InterpretOptionsLenient except that it accepts an +// unlinked result. Because the file is unlinked, custom options cannot be +// interpreted. Other errors resolving option names or type errors will be +// effectively ignored. Any options that are uninterpretable (due to such +// errors) will remain in the "uninterpreted_option" fields. +func InterpretUnlinkedOptions(parsed parser.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { + return interpretOptions(true, noResolveFile{parsed}, nil, reporter.NewHandler(nil), opts) +} + +func interpretOptions(lenient bool, file file, res linker.Resolver, handler *reporter.Handler, interpOpts []InterpreterOption) (sourceinfo.OptionIndex, error) { + interp := &interpreter{ + file: file, + resolver: res, + lenient: lenient, + reporter: handler, + index: sourceinfo.OptionIndex{}, + pathBuffer: make([]int32, 0, 16), + } + for _, opt := range interpOpts { + opt(interp) + } + // We have to do this in two phases. First we interpret non-custom options. + // This allows us to handle standard options and features that may needed to + // correctly reference the custom options in the second phase. + if err := interp.interpretFileOptions(file, false); err != nil { + return nil, err + } + // Now we can do custom options. + if err := interp.interpretFileOptions(file, true); err != nil { + return nil, err + } + return interp.index, nil +} + +func (interp *interpreter) handleErrorf(span ast.SourceSpan, msg string, args ...interface{}) error { + if interp.lenienceEnabled { + interp.lenientErrReported = true + return nil + } + return interp.reporter.HandleErrorf(span, msg, args...) +} + +func (interp *interpreter) handleErrorWithPos(span ast.SourceSpan, err error) error { + if interp.lenienceEnabled { + interp.lenientErrReported = true + return nil + } + return interp.reporter.HandleErrorWithPos(span, err) +} + +func (interp *interpreter) handleError(err error) error { + if interp.lenienceEnabled { + interp.lenientErrReported = true + return nil + } + return interp.reporter.HandleError(err) +} + +func (interp *interpreter) interpretFileOptions(file file, customOpts bool) error { + fd := file.FileDescriptorProto() + prefix := fd.GetPackage() + if prefix != "" { + prefix += "." + } + err := interpretElementOptions(interp, fd.GetName(), targetTypeFile, fd, customOpts) + if err != nil { + return err + } + for _, md := range fd.GetMessageType() { + fqn := prefix + md.GetName() + if err := interp.interpretMessageOptions(fqn, md, customOpts); err != nil { + return err + } + } + for _, fld := range fd.GetExtension() { + fqn := prefix + fld.GetName() + if err := interp.interpretFieldOptions(fqn, fld, customOpts); err != nil { + return err + } + } + for _, ed := range fd.GetEnumType() { + fqn := prefix + ed.GetName() + if err := interp.interpretEnumOptions(fqn, ed, customOpts); err != nil { + return err + } + } + for _, sd := range fd.GetService() { + fqn := prefix + sd.GetName() + err := interpretElementOptions(interp, fqn, targetTypeService, sd, customOpts) + if err != nil { + return err + } + for _, mtd := range sd.GetMethod() { + mtdFqn := fqn + "." + mtd.GetName() + err := interpretElementOptions(interp, mtdFqn, targetTypeMethod, mtd, customOpts) + if err != nil { + return err + } + } + } + return nil +} + +func resolveDescriptor[T protoreflect.Descriptor](res linker.Resolver, name string) T { + var zero T + if res == nil { + return zero + } + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + desc, _ := res.FindDescriptorByName(protoreflect.FullName(name)) + typedDesc, ok := desc.(T) + if ok { + return typedDesc + } + return zero +} + +func (interp *interpreter) resolveExtensionType(name string) (protoreflect.ExtensionTypeDescriptor, error) { + if interp.resolver == nil { + return nil, protoregistry.NotFound + } + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + ext, err := interp.resolver.FindExtensionByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return ext.TypeDescriptor(), nil +} + +func (interp *interpreter) resolveOptionsType(name string) protoreflect.MessageDescriptor { + md := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, name) + if md != nil { + return md + } + if interp.overrideDescriptorProto == nil { + return nil + } + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + desc := interp.overrideDescriptorProto.FindDescriptorByName(protoreflect.FullName(name)) + if md, ok := desc.(protoreflect.MessageDescriptor); ok { + return md + } + return nil +} + +func (interp *interpreter) nodeInfo(n ast.Node) ast.NodeInfo { + return interp.file.FileNode().NodeInfo(n) +} + +func (interp *interpreter) interpretMessageOptions(fqn string, md *descriptorpb.DescriptorProto, customOpts bool) error { + err := interpretElementOptions(interp, fqn, targetTypeMessage, md, customOpts) + if err != nil { + return err + } + for _, fld := range md.GetField() { + fldFqn := fqn + "." + fld.GetName() + if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil { + return err + } + } + for _, ood := range md.GetOneofDecl() { + oodFqn := fqn + "." + ood.GetName() + err := interpretElementOptions(interp, oodFqn, targetTypeOneof, ood, customOpts) + if err != nil { + return err + } + } + for _, fld := range md.GetExtension() { + fldFqn := fqn + "." + fld.GetName() + if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil { + return err + } + } + for _, er := range md.GetExtensionRange() { + erFqn := fmt.Sprintf("%s.%d-%d", fqn, er.GetStart(), er.GetEnd()) + err := interpretElementOptions(interp, erFqn, targetTypeExtensionRange, er, customOpts) + if err != nil { + return err + } + } + for _, nmd := range md.GetNestedType() { + nmdFqn := fqn + "." + nmd.GetName() + if err := interp.interpretMessageOptions(nmdFqn, nmd, customOpts); err != nil { + return err + } + } + for _, ed := range md.GetEnumType() { + edFqn := fqn + "." + ed.GetName() + if err := interp.interpretEnumOptions(edFqn, ed, customOpts); err != nil { + return err + } + } + + // We also copy features for map fields down to their synthesized key and value fields. + for _, fld := range md.GetField() { + entryName := internal.InitCap(internal.JSONName(fld.GetName())) + "Entry" + if fld.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED || + fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && + fld.GetTypeName() != "."+fqn+"."+entryName { + // can't be a map field + continue + } + if fld.Options == nil || fld.Options.Features == nil { + // no features to propagate + continue + } + for _, nmd := range md.GetNestedType() { + if nmd.GetName() == entryName { + // found the entry message + if !nmd.GetOptions().GetMapEntry() { + break // not a map + } + for _, mapField := range nmd.Field { + if mapField.Options == nil { + mapField.Options = &descriptorpb.FieldOptions{} + } + features := proto.Clone(fld.Options.Features).(*descriptorpb.FeatureSet) //nolint:errcheck + if mapField.Options.Features != nil { + proto.Merge(features, mapField.Options.Features) + } + mapField.Options.Features = features + } + break + } + } + } + + return nil +} + +var emptyFieldOptions = &descriptorpb.FieldOptions{} + +func (interp *interpreter) interpretFieldOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, customOpts bool) error { + opts := fld.GetOptions() + emptyOptionsAlreadyPresent := opts != nil && len(opts.GetUninterpretedOption()) == 0 + + // For non-custom phase, first process pseudo-options + if len(opts.GetUninterpretedOption()) > 0 && !customOpts { + interp.enableLenience(true) + err := interp.interpretFieldPseudoOptions(fqn, fld, opts) + interp.enableLenience(false) + if err != nil { + return err + } + } + + // Must re-check length of uninterpreted options since above step could remove some. + if len(opts.GetUninterpretedOption()) == 0 { + // If the message has no other interpreted options, we clear it out. But don't + // do that if the descriptor came in with empty options or if it already has + // interpreted option fields. + if opts != nil && !emptyOptionsAlreadyPresent && proto.Equal(fld.Options, emptyFieldOptions) { + fld.Options = nil + } + return nil + } + + // Then process actual options. + return interpretElementOptions(interp, fqn, targetTypeField, fld, customOpts) +} + +func (interp *interpreter) interpretFieldPseudoOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) error { + scope := "field " + fqn + uo := opts.UninterpretedOption + + // process json_name pseudo-option + if index, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uo, "json_name"); err != nil { + return err + } else if index >= 0 { + opt := uo[index] + optNode := interp.file.OptionNode(opt) + if opt.StringValue == nil { + return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: expecting string value for json_name option", scope) + } + jsonName := string(opt.StringValue) + // Extensions don't support custom json_name values. + // If the value is already set (via the descriptor) and doesn't match the default value, return an error. + if fld.GetExtendee() != "" && jsonName != "" && jsonName != internal.JSONName(fld.GetName()) { + return interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: option json_name is not allowed on extensions", scope) + } + // attribute source code info + if on, ok := optNode.(*ast.OptionNode); ok { + interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldJSONNameTag}} + } + uo = internal.RemoveOption(uo, index) + if strings.HasPrefix(jsonName, "[") && strings.HasSuffix(jsonName, "]") { + return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: option json_name value cannot start with '[' and end with ']'; that is reserved for representing extensions", scope) + } + fld.JsonName = proto.String(jsonName) + } + + // and process default pseudo-option + if index, err := interp.processDefaultOption(scope, fqn, fld, uo); err != nil { + return err + } else if index >= 0 { + // attribute source code info + optNode := interp.file.OptionNode(uo[index]) + if on, ok := optNode.(*ast.OptionNode); ok { + interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldDefaultTag}} + } + uo = internal.RemoveOption(uo, index) + } + + opts.UninterpretedOption = uo + return nil +} + +func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *descriptorpb.FieldDescriptorProto, uos []*descriptorpb.UninterpretedOption) (defaultIndex int, err error) { + found, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uos, "default") + if err != nil || found == -1 { + return -1, err + } + opt := uos[found] + optNode := interp.file.OptionNode(opt) + if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is repeated", scope) + } + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE { + return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is a message", scope) + } + mc := &internal.MessageContext{ + File: interp.file, + ElementName: fqn, + ElementType: descriptorType(fld), + Option: opt, + } + + val := optNode.GetValue() + var v interface{} + if val.Value() == nil { + // no value in the AST, so we dig the value out of the uninterpreted option proto + v, err = interp.defaultValueFromProto(mc, fld, opt, val) + } else { + // compute value from AST + v, err = interp.defaultValue(mc, fld, val) + } + if err != nil { + return -1, interp.handleError(err) + } + + if str, ok := v.(string); ok { + fld.DefaultValue = proto.String(str) + } else if b, ok := v.([]byte); ok { + fld.DefaultValue = proto.String(encodeDefaultBytes(b)) + } else { + var flt float64 + var ok bool + if flt, ok = v.(float64); !ok { + var flt32 float32 + if flt32, ok = v.(float32); ok { + flt = float64(flt32) + } + } + if ok { + switch { + case math.IsInf(flt, 1): + fld.DefaultValue = proto.String("inf") + case math.IsInf(flt, -1): + fld.DefaultValue = proto.String("-inf") + case math.IsNaN(flt): + fld.DefaultValue = proto.String("nan") + default: + fld.DefaultValue = proto.String(fmt.Sprintf("%v", v)) + } + } else { + fld.DefaultValue = proto.String(fmt.Sprintf("%v", v)) + } + } + return found, nil +} + +func (interp *interpreter) defaultValue(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, val ast.ValueNode) (interface{}, error) { + if _, ok := val.(*ast.MessageLiteralNode); ok { + return -1, reporter.Errorf(interp.nodeInfo(val), "%vdefault value cannot be a message", mc) + } + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName()) + if ed == nil { + return -1, reporter.Errorf(interp.nodeInfo(val), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName()) + } + _, name, err := interp.enumFieldValue(mc, ed, val, false) + if err != nil { + return -1, err + } + return string(name), nil + } + return interp.scalarFieldValue(mc, fld.GetType(), val, false) +} + +func (interp *interpreter) defaultValueFromProto(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, opt *descriptorpb.UninterpretedOption, node ast.Node) (interface{}, error) { + if opt.AggregateValue != nil { + return -1, reporter.Errorf(interp.nodeInfo(node), "%vdefault value cannot be a message", mc) + } + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName()) + if ed == nil { + return -1, reporter.Errorf(interp.nodeInfo(node), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName()) + } + _, name, err := interp.enumFieldValueFromProto(mc, ed, opt, node) + if err != nil { + return nil, err + } + return string(name), nil + } + return interp.scalarFieldValueFromProto(mc, fld.GetType(), opt, node) +} + +func encodeDefaultBytes(b []byte) string { + var buf bytes.Buffer + internal.WriteEscapedBytes(&buf, b) + return buf.String() +} + +func (interp *interpreter) interpretEnumOptions(fqn string, ed *descriptorpb.EnumDescriptorProto, customOpts bool) error { + err := interpretElementOptions(interp, fqn, targetTypeEnum, ed, customOpts) + if err != nil { + return err + } + for _, evd := range ed.GetValue() { + evdFqn := fqn + "." + evd.GetName() + err := interpretElementOptions(interp, evdFqn, targetTypeEnumValue, evd, customOpts) + if err != nil { + return err + } + } + return nil +} + +func interpretElementOptions[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]]( + interp *interpreter, + fqn string, + target *targetType[Elem, OptsStruct, Opts], + elem Elem, + customOpts bool, +) error { + opts := elem.GetOptions() + uninterpreted := opts.GetUninterpretedOption() + if len(uninterpreted) > 0 { + remain, err := interp.interpretOptions(fqn, target.t, elem, opts, uninterpreted, customOpts) + if err != nil { + return err + } + target.setUninterpretedOptions(opts, remain) + } else if customOpts { + // If customOpts is true, we are in second pass of interpreting. + // For second pass, even if there are no options to interpret, we still + // need to verify feature usage. + features := opts.GetFeatures() + var msg protoreflect.Message + if len(features.ProtoReflect().GetUnknown()) > 0 { + // We need to first convert to a message that uses the sources' definition + // of FeatureSet. + optsDesc := opts.ProtoReflect().Descriptor() + optsFqn := string(optsDesc.FullName()) + if md := interp.resolveOptionsType(optsFqn); md != nil { + dm := dynamicpb.NewMessage(md) + if err := cloneInto(dm, opts, interp.resolver); err != nil { + node := interp.file.Node(elem) + return interp.handleError(reporter.Error(interp.nodeInfo(node), err)) + } + msg = dm + } + } + if msg == nil { + msg = opts.ProtoReflect() + } + err := interp.validateRecursive(false, msg, "", elem, nil, false, false, false) + if err != nil { + return err + } + } + return nil +} + +// interpretOptions processes the options in uninterpreted, which are interpreted as fields +// of the given opts message. The first return value is the features to use for child elements. +// On success, the latter two return values will usually be nil, nil. But if the current +// operation is lenient, it may return a non-nil slice of uninterpreted options on success. +// In such a case, the returned slice contains the options which could not be interpreted. +func (interp *interpreter) interpretOptions( + fqn string, + targetType descriptorpb.FieldOptions_OptionTargetType, + element, opts proto.Message, + uninterpreted []*descriptorpb.UninterpretedOption, + customOpts bool, +) ([]*descriptorpb.UninterpretedOption, error) { + optsDesc := opts.ProtoReflect().Descriptor() + optsFqn := string(optsDesc.FullName()) + var msg protoreflect.Message + // see if the parse included an override copy for these options + if md := interp.resolveOptionsType(optsFqn); md != nil { + dm := dynamicpb.NewMessage(md) + if err := cloneInto(dm, opts, interp.resolver); err != nil { + node := interp.file.Node(element) + return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err)) + } + msg = dm + } else { + msg = proto.Clone(opts).ProtoReflect() + } + + mc := &internal.MessageContext{ + File: interp.file, + ElementName: fqn, + ElementType: descriptorType(element), + } + var remain []*descriptorpb.UninterpretedOption + for _, uo := range uninterpreted { + isCustom := uo.Name[0].GetIsExtension() + if isCustom != customOpts { + // We're not looking at these this phase. + remain = append(remain, uo) + continue + } + firstName := uo.Name[0].GetNamePart() + if targetType == descriptorpb.FieldOptions_TARGET_TYPE_FIELD && + !isCustom && (firstName == "default" || firstName == "json_name") { + // Field pseudo-option that we can skip and is handled elsewhere. + remain = append(remain, uo) + continue + } + node := interp.file.OptionNode(uo) + if !isCustom && firstName == "uninterpreted_option" { + if interp.lenient { + remain = append(remain, uo) + continue + } + // uninterpreted_option might be found reflectively, but is not actually valid for use + if err := interp.handleErrorf(interp.nodeInfo(node.GetName()), "%vinvalid option 'uninterpreted_option'", mc); err != nil { + return nil, err + } + } + mc.Option = uo + interp.enableLenience(true) + srcInfo, err := interp.interpretField(targetType, mc, msg, uo, 0, interp.pathBuffer) + interp.enableLenience(false) + if err != nil { + return nil, err + } + if interp.lenientErrReported { + remain = append(remain, uo) + continue + } + + if srcInfo != nil { + if optn, ok := node.(*ast.OptionNode); ok { + interp.index[optn] = srcInfo + } + } + } + + // customOpts is true for the second pass, which is also when we want to validate feature usage. + doValidation := customOpts + if doValidation { + validateRequiredFields := !interp.lenient + err := interp.validateRecursive(validateRequiredFields, msg, "", element, nil, false, false, false) + if err != nil { + return nil, err + } + } + + if interp.lenient { + // If we're lenient, then we don't want to clobber the passed in message + // and leave it partially populated. So we convert into a copy first + optsClone := opts.ProtoReflect().New().Interface() + if err := cloneInto(optsClone, msg.Interface(), interp.resolver); err != nil { + // TODO: do this in a more granular way, so we can convert individual + // fields and leave bad ones uninterpreted instead of skipping all of + // the work we've done so far. + return uninterpreted, nil + } + if doValidation { + if err := proto.CheckInitialized(optsClone); err != nil { + // Conversion from dynamic message failed to set some required fields. + // TODO above applies here as well... + return uninterpreted, nil + } + } + // conversion from dynamic message above worked, so now + // it is safe to overwrite the passed in message + proto.Reset(opts) + proto.Merge(opts, optsClone) + + return remain, nil + } + + // now try to convert into the passed in message and fail if not successful + if err := cloneInto(opts, msg.Interface(), interp.resolver); err != nil { + node := interp.file.Node(element) + return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err)) + } + + return remain, nil +} + +// checkFieldUsage verifies that the given option field can be used +// for the given target type. It reports an error if not and returns +// a non-nil error if the handler returned a non-nil error. +func (interp *interpreter) checkFieldUsage( + targetType descriptorpb.FieldOptions_OptionTargetType, + fld protoreflect.FieldDescriptor, + node ast.Node, +) error { + msgOpts, _ := fld.ContainingMessage().Options().(*descriptorpb.MessageOptions) + if msgOpts.GetMessageSetWireFormat() && !messageset.CanSupportMessageSets() { + err := interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option: it uses 'message set wire format' legacy proto1 feature which is not supported", fld.FullName()) + if err != nil { + return err + } + } + + opts, ok := fld.Options().(*descriptorpb.FieldOptions) + if !ok { + return nil + } + targetTypes := opts.GetTargets() + if len(targetTypes) == 0 { + return nil + } + for _, allowedType := range targetTypes { + if allowedType == targetType { + return nil + } + } + allowedTypes := make([]string, len(targetTypes)) + for i, t := range targetTypes { + allowedTypes[i] = targetTypeString(t) + } + if len(targetTypes) == 1 && targetTypes[0] == descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN { + return interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option (it declares no allowed target types)", fld.FullName()) + } + return interp.handleErrorf(interp.nodeInfo(node), "field %q is allowed on [%s], not on %s", fld.FullName(), strings.Join(allowedTypes, ","), targetTypeString(targetType)) +} + +func targetTypeString(t descriptorpb.FieldOptions_OptionTargetType) string { + return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "TARGET_TYPE_"), "_", " ")) +} + +func editionString(t descriptorpb.Edition) string { + return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "EDITION_"), "_", "-")) +} + +func cloneInto(dest proto.Message, src proto.Message, res linker.Resolver) error { + if dest.ProtoReflect().Descriptor() == src.ProtoReflect().Descriptor() { + proto.Reset(dest) + proto.Merge(dest, src) + return nil + } + + // If descriptors are not the same, we could have field descriptors in src that + // don't match the ones in dest. There's no easy/sane way to handle that. So we + // just marshal to bytes and back to do this + marshaler := proto.MarshalOptions{ + // We've already validated required fields before this point, + // so we can allow partial here. + AllowPartial: true, + } + data, err := marshaler.Marshal(src) + if err != nil { + return err + } + unmarshaler := proto.UnmarshalOptions{AllowPartial: true} + if res != nil { + unmarshaler.Resolver = res + } else { + // Use a typed nil, which returns "not found" to all queries + // and prevents fallback to protoregistry.GlobalTypes. + unmarshaler.Resolver = (*protoregistry.Types)(nil) + } + return unmarshaler.Unmarshal(data, dest) +} + +func (interp *interpreter) validateRecursive( + validateRequiredFields bool, + msg protoreflect.Message, + prefix string, + element proto.Message, + path []int32, + isFeatures bool, + inFeatures bool, + inMap bool, +) error { + if validateRequiredFields { + flds := msg.Descriptor().Fields() + var missingFields []string + for i := 0; i < flds.Len(); i++ { + fld := flds.Get(i) + if fld.Cardinality() == protoreflect.Required && !msg.Has(fld) { + missingFields = append(missingFields, fmt.Sprintf("%s%s", prefix, fld.Name())) + } + } + if len(missingFields) > 0 { + node := interp.findOptionNode(path, element) + err := interp.handleErrorf(interp.nodeInfo(node), "error in %s options: some required fields missing: %v", descriptorType(element), strings.Join(missingFields, ", ")) + if err != nil { + return err + } + } + } + + var err error + msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + chpath := path + if !inMap { + chpath = append(chpath, int32(fld.Number())) + } + chInFeatures := isFeatures || inFeatures + chIsFeatures := !chInFeatures && len(path) == 0 && fld.Name() == "features" + + if (isFeatures || (inFeatures && fld.IsExtension())) && + interp.file.FileNode().Name() == fld.ParentFile().Path() { + var what, name string + if fld.IsExtension() { + what = "custom feature" + name = "(" + string(fld.FullName()) + ")" + } else { + what = "feature" + name = string(fld.Name()) + } + node := interp.findOptionNode(path, element) + err = interp.handleErrorf(interp.nodeInfo(node), "%s %s cannot be used from the same file in which it is defined", what, name) + if err != nil { + return false + } + } + + if chInFeatures { + // Validate feature usage against feature settings. + + // First, check the feature support settings of the field. + opts, _ := fld.Options().(*descriptorpb.FieldOptions) + edition := interp.file.FileDescriptorProto().GetEdition() + if opts != nil && opts.FeatureSupport != nil { + err = interp.validateFeatureSupport(edition, opts.FeatureSupport, "field", string(fld.FullName()), chpath, element) + if err != nil { + return false + } + } + // Then, if it's an enum or has an enum, check the feature support settings of the enum values. + var enum protoreflect.EnumDescriptor + if fld.Enum() != nil { + enum = fld.Enum() + } else if fld.IsMap() && fld.MapValue().Enum() != nil { + enum = fld.MapValue().Enum() + } + if enum != nil { + switch { + case fld.IsMap(): + val.Map().Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + // Can't construct path to particular map entry since we don't this entry's index. + // So we leave chpath alone, and it will have to point to the whole map value (or + // the first entry if the map is de-structured across multiple option statements). + err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), chpath, element) + return err == nil + }) + if err != nil { + return false + } + case fld.IsList(): + sl := val.List() + for i := 0; i < sl.Len(); i++ { + v := sl.Get(i) + err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), append(chpath, int32(i)), element) + if err != nil { + return false + } + } + default: + err = interp.validateEnumValueFeatureSupport(edition, enum, val.Enum(), chpath, element) + if err != nil { + return false + } + } + } + } + + // If it's a message or contains a message, recursively validate fields in those messages. + switch { + case fld.IsMap() && fld.MapValue().Message() != nil: + val.Map().Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + chprefix := fmt.Sprintf("%s%s[%v].", prefix, fieldName(fld), k) + err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, true) + return err == nil + }) + if err != nil { + return false + } + case fld.IsList() && fld.Message() != nil: + sl := val.List() + for i := 0; i < sl.Len(); i++ { + v := sl.Get(i) + chprefix := fmt.Sprintf("%s%s[%d].", prefix, fieldName(fld), i) + if !inMap { + chpath = append(chpath, int32(i)) + } + err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap) + if err != nil { + return false + } + } + case !fld.IsMap() && fld.Message() != nil: + chprefix := fmt.Sprintf("%s%s.", prefix, fieldName(fld)) + err = interp.validateRecursive(validateRequiredFields, val.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap) + if err != nil { + return false + } + } + return true + }) + return err +} + +func (interp *interpreter) validateEnumValueFeatureSupport( + edition descriptorpb.Edition, + enum protoreflect.EnumDescriptor, + number protoreflect.EnumNumber, + path []int32, + element proto.Message, +) error { + enumVal := enum.Values().ByNumber(number) + if enumVal == nil { + return nil + } + enumValOpts, _ := enumVal.Options().(*descriptorpb.EnumValueOptions) + if enumValOpts == nil || enumValOpts.FeatureSupport == nil { + return nil + } + return interp.validateFeatureSupport(edition, enumValOpts.FeatureSupport, "enum value", string(enumVal.Name()), path, element) +} + +func (interp *interpreter) validateFeatureSupport( + edition descriptorpb.Edition, + featureSupport *descriptorpb.FieldOptions_FeatureSupport, + what string, + name string, + path []int32, + element proto.Message, +) error { + if featureSupport.EditionIntroduced != nil && edition < featureSupport.GetEditionIntroduced() { + node := interp.findOptionNode(path, element) + err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was not introduced until edition %s", what, name, editionString(featureSupport.GetEditionIntroduced())) + if err != nil { + return err + } + } + if featureSupport.EditionRemoved != nil && edition >= featureSupport.GetEditionRemoved() { + node := interp.findOptionNode(path, element) + err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was removed in edition %s", what, name, editionString(featureSupport.GetEditionRemoved())) + if err != nil { + return err + } + } + if featureSupport.EditionDeprecated != nil && edition >= featureSupport.GetEditionDeprecated() { + node := interp.findOptionNode(path, element) + var suffix string + if featureSupport.GetDeprecationWarning() != "" { + suffix = ": " + featureSupport.GetDeprecationWarning() + } + interp.reporter.HandleWarningf(interp.nodeInfo(node), "%s %q is deprecated as of edition %s%s", what, name, editionString(featureSupport.GetEditionDeprecated()), suffix) + } + return nil +} + +func (interp *interpreter) findOptionNode( + path []int32, + element proto.Message, +) ast.Node { + elementNode := interp.file.Node(element) + nodeWithOpts, _ := elementNode.(ast.NodeWithOptions) + if nodeWithOpts == nil { + return elementNode + } + node, _ := findOptionNode[*ast.OptionNode]( + path, + optionsRanger{nodeWithOpts}, + func(n *ast.OptionNode) *sourceinfo.OptionSourceInfo { + return interp.index[n] + }, + ) + if node != nil { + return node + } + return elementNode +} + +func findOptionNode[N ast.Node]( + path []int32, + nodes interface { + Range(func(N, ast.ValueNode) bool) + }, + srcInfoAccessor func(N) *sourceinfo.OptionSourceInfo, +) (ast.Node, int) { + var bestMatch ast.Node + var bestMatchLen int + nodes.Range(func(node N, val ast.ValueNode) bool { + srcInfo := srcInfoAccessor(node) + if srcInfo == nil { + // can happen if we are lenient when interpreting -- this node + // could not be interpreted and thus has no source info; skip + return true + } + if srcInfo.Path[0] < 0 { + // negative first value means it's a field pseudo-option; skip + return true + } + match, matchLen := findOptionValueNode(path, node, val, srcInfo) + if matchLen > bestMatchLen { + bestMatch = match + bestMatchLen = matchLen + if matchLen >= len(path) { + // not going to find a better one + return false + } + } + return true + }) + return bestMatch, bestMatchLen +} + +type optionsRanger struct { + node ast.NodeWithOptions +} + +func (r optionsRanger) Range(f func(*ast.OptionNode, ast.ValueNode) bool) { + r.node.RangeOptions(func(optNode *ast.OptionNode) bool { + return f(optNode, optNode.Val) + }) +} + +type valueRanger []ast.ValueNode + +func (r valueRanger) Range(f func(ast.ValueNode, ast.ValueNode) bool) { + for _, elem := range r { + if !f(elem, elem) { + return + } + } +} + +type fieldRanger map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo + +func (r fieldRanger) Range(f func(*ast.MessageFieldNode, ast.ValueNode) bool) { + for elem := range r { + if !f(elem, elem.Val) { + return + } + } +} + +func isPathMatch(a, b []int32) bool { + length := len(a) + if len(b) < length { + length = len(b) + } + for i := 0; i < length; i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func findOptionValueNode( + path []int32, + node ast.Node, + value ast.ValueNode, + srcInfo *sourceinfo.OptionSourceInfo, +) (ast.Node, int) { + srcInfoPath := srcInfo.Path + if _, ok := srcInfo.Children.(*sourceinfo.ArrayLiteralSourceInfo); ok { + // Last path element for array source info is the index of the + // first element. So exclude in the comparison, since path could + // indicate a later index, which is present in the array. + srcInfoPath = srcInfo.Path[:len(srcInfo.Path)-1] + } + + if !isPathMatch(path, srcInfoPath) { + return nil, 0 + } + if len(srcInfoPath) >= len(path) { + return node, len(path) + } + + switch children := srcInfo.Children.(type) { + case *sourceinfo.ArrayLiteralSourceInfo: + array, ok := value.(*ast.ArrayLiteralNode) + if !ok { + break // should never happen + } + var i int + match, matchLen := findOptionNode[ast.ValueNode]( + path, + valueRanger(array.Elements), + func(_ ast.ValueNode) *sourceinfo.OptionSourceInfo { + val := &children.Elements[i] + i++ + return val + }, + ) + if match != nil { + return match, matchLen + } + + case *sourceinfo.MessageLiteralSourceInfo: + match, matchLen := findOptionNode[*ast.MessageFieldNode]( + path, + fieldRanger(children.Fields), + func(n *ast.MessageFieldNode) *sourceinfo.OptionSourceInfo { + return children.Fields[n] + }, + ) + if match != nil { + return match, matchLen + } + } + + return node, len(srcInfoPath) +} + +// interpretField interprets the option described by opt, as a field inside the given msg. This +// interprets components of the option name starting at nameIndex. When nameIndex == 0, then +// msg must be an options message. For nameIndex > 0, msg is a nested message inside of the +// options message. The given pathPrefix is the path (sequence of field numbers and indices +// with a FileDescriptorProto as the start) up to but not including the given nameIndex. +// +// Any errors encountered will be handled, so the returned error will only be non-nil if +// the handler returned non-nil. Callers must check that the source info is non-nil before +// using it since it can be nil (in the event of a problem) even if the error is nil. +func (interp *interpreter) interpretField( + targetType descriptorpb.FieldOptions_OptionTargetType, + mc *internal.MessageContext, + msg protoreflect.Message, + opt *descriptorpb.UninterpretedOption, + nameIndex int, + pathPrefix []int32, +) (*sourceinfo.OptionSourceInfo, error) { + var fld protoreflect.FieldDescriptor + nm := opt.GetName()[nameIndex] + node := interp.file.OptionNamePartNode(nm) + if nm.GetIsExtension() { + extName := nm.GetNamePart() + if extName[0] == '.' { + extName = extName[1:] /* skip leading dot */ + } + var err error + fld, err = interp.resolveExtensionType(extName) + if errors.Is(err, protoregistry.NotFound) { + return nil, interp.handleErrorf(interp.nodeInfo(node), + "%vunrecognized extension %s of %s", + mc, extName, msg.Descriptor().FullName()) + } else if err != nil { + return nil, interp.handleErrorWithPos(interp.nodeInfo(node), err) + } + if fld.ContainingMessage().FullName() != msg.Descriptor().FullName() { + return nil, interp.handleErrorf(interp.nodeInfo(node), + "%vextension %s should extend %s but instead extends %s", + mc, extName, msg.Descriptor().FullName(), fld.ContainingMessage().FullName()) + } + } else { + fld = msg.Descriptor().Fields().ByName(protoreflect.Name(nm.GetNamePart())) + if fld == nil { + return nil, interp.handleErrorf(interp.nodeInfo(node), + "%vfield %s of %s does not exist", + mc, nm.GetNamePart(), msg.Descriptor().FullName()) + } + } + pathPrefix = append(pathPrefix, int32(fld.Number())) + + if err := interp.checkFieldUsage(targetType, fld, node); err != nil { + return nil, err + } + + if len(opt.GetName()) > nameIndex+1 { + nextnm := opt.GetName()[nameIndex+1] + nextnode := interp.file.OptionNamePartNode(nextnm) + k := fld.Kind() + if k != protoreflect.MessageKind && k != protoreflect.GroupKind { + return nil, interp.handleErrorf(interp.nodeInfo(nextnode), + "%vcannot set field %s because %s is not a message", + mc, nextnm.GetNamePart(), nm.GetNamePart()) + } + if fld.Cardinality() == protoreflect.Repeated { + return nil, interp.handleErrorf(interp.nodeInfo(nextnode), + "%vcannot set field %s because %s is repeated (must use an aggregate)", + mc, nextnm.GetNamePart(), nm.GetNamePart()) + } + var fdm protoreflect.Message + if msg.Has(fld) { + v := msg.Mutable(fld) + fdm = v.Message() + } else { + if ood := fld.ContainingOneof(); ood != nil { + existingFld := msg.WhichOneof(ood) + if existingFld != nil && existingFld.Number() != fld.Number() { + return nil, interp.handleErrorf(interp.nodeInfo(node), + "%voneof %q already has field %q set", + mc, ood.Name(), fieldName(existingFld)) + } + } + fldVal := msg.NewField(fld) + fdm = fldVal.Message() + msg.Set(fld, fldVal) + } + // recurse to set next part of name + return interp.interpretField(targetType, mc, fdm, opt, nameIndex+1, pathPrefix) + } + + optNode := interp.file.OptionNode(opt) + optValNode := optNode.GetValue() + var srcInfo *sourceinfo.OptionSourceInfo + var err error + if optValNode.Value() == nil { + err = interp.setOptionFieldFromProto(targetType, mc, msg, fld, node, opt, optValNode) + srcInfoVal := newSrcInfo(pathPrefix, nil) + srcInfo = &srcInfoVal + } else { + srcInfo, err = interp.setOptionField(targetType, mc, msg, fld, node, optValNode, false, pathPrefix) + } + if err != nil { + return nil, err + } + + return srcInfo, nil +} + +// setOptionField sets the value for field fld in the given message msg to the value represented +// by AST node val. The given name is the AST node that corresponds to the name of fld. On success, +// it returns additional metadata about the field that was set. +func (interp *interpreter) setOptionField( + targetType descriptorpb.FieldOptions_OptionTargetType, + mc *internal.MessageContext, + msg protoreflect.Message, + fld protoreflect.FieldDescriptor, + name ast.Node, + val ast.ValueNode, + insideMsgLiteral bool, + pathPrefix []int32, +) (*sourceinfo.OptionSourceInfo, error) { + v := val.Value() + if sl, ok := v.([]ast.ValueNode); ok { + // handle slices a little differently than the others + if fld.Cardinality() != protoreflect.Repeated { + return nil, interp.handleErrorf(interp.nodeInfo(val), "%vvalue is an array but field is not repeated", mc) + } + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + childVals := make([]sourceinfo.OptionSourceInfo, len(sl)) + var firstIndex int + if fld.IsMap() { + firstIndex = msg.Get(fld).Map().Len() + } else { + firstIndex = msg.Get(fld).List().Len() + } + for index, item := range sl { + mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, index) + value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, item, insideMsgLiteral, append(pathPrefix, int32(firstIndex+index))) + if err != nil || !value.IsValid() { + return nil, err + } + if fld.IsMap() { + mv := msg.Mutable(fld).Map() + setMapEntry(fld, msg, mv, value.Message()) + } else { + lv := msg.Mutable(fld).List() + lv.Append(value) + } + childVals[index] = srcInfo + } + srcInfo := newSrcInfo(append(pathPrefix, int32(firstIndex)), &sourceinfo.ArrayLiteralSourceInfo{Elements: childVals}) + return &srcInfo, nil + } + + if fld.IsMap() { + pathPrefix = append(pathPrefix, int32(msg.Get(fld).Map().Len())) + } else if fld.IsList() { + pathPrefix = append(pathPrefix, int32(msg.Get(fld).List().Len())) + } + + value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, val, insideMsgLiteral, pathPrefix) + if err != nil || !value.IsValid() { + return nil, err + } + + if ood := fld.ContainingOneof(); ood != nil { + existingFld := msg.WhichOneof(ood) + if existingFld != nil && existingFld.Number() != fld.Number() { + return nil, interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) + } + } + + switch { + case fld.IsMap(): + mv := msg.Mutable(fld).Map() + setMapEntry(fld, msg, mv, value.Message()) + case fld.IsList(): + lv := msg.Mutable(fld).List() + lv.Append(value) + default: + if msg.Has(fld) { + return nil, interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld)) + } + msg.Set(fld, value) + } + return &srcInfo, nil +} + +// setOptionFieldFromProto sets the value for field fld in the given message msg to the value +// represented by the given uninterpreted option. The given ast.Node, if non-nil, will be used +// to report source positions in error messages. On success, it returns additional metadata +// about the field that was set. +func (interp *interpreter) setOptionFieldFromProto( + targetType descriptorpb.FieldOptions_OptionTargetType, + mc *internal.MessageContext, + msg protoreflect.Message, + fld protoreflect.FieldDescriptor, + name ast.Node, + opt *descriptorpb.UninterpretedOption, + node ast.Node, +) error { + k := fld.Kind() + var value protoreflect.Value + switch k { + case protoreflect.EnumKind: + num, _, err := interp.enumFieldValueFromProto(mc, fld.Enum(), opt, node) + if err != nil { + return interp.handleError(err) + } + value = protoreflect.ValueOfEnum(num) + + case protoreflect.MessageKind, protoreflect.GroupKind: + if opt.AggregateValue == nil { + return interp.handleErrorf(interp.nodeInfo(node), "%vexpecting message, got %s", mc, optionValueKind(opt)) + } + // We must parse the text format from the aggregate value string + var elem protoreflect.Message + switch { + case fld.IsMap(): + elem = dynamicpb.NewMessage(fld.Message()) + case fld.IsList(): + elem = msg.Get(fld).List().NewElement().Message() + default: + elem = msg.NewField(fld).Message() + } + err := prototext.UnmarshalOptions{ + Resolver: &msgLiteralResolver{interp: interp, pkg: fld.ParentFile().Package()}, + AllowPartial: true, + }.Unmarshal([]byte(opt.GetAggregateValue()), elem.Interface()) + if err != nil { + return interp.handleErrorf(interp.nodeInfo(node), "%vfailed to parse message literal %w", mc, err) + } + if err := interp.checkFieldUsagesInMessage(targetType, elem, node); err != nil { + return err + } + value = protoreflect.ValueOfMessage(elem) + + default: + v, err := interp.scalarFieldValueFromProto(mc, descriptorpb.FieldDescriptorProto_Type(k), opt, node) + if err != nil { + return interp.handleError(err) + } + value = protoreflect.ValueOf(v) + } + + if ood := fld.ContainingOneof(); ood != nil { + existingFld := msg.WhichOneof(ood) + if existingFld != nil && existingFld.Number() != fld.Number() { + return interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) + } + } + + switch { + case fld.IsMap(): + mv := msg.Mutable(fld).Map() + setMapEntry(fld, msg, mv, value.Message()) + case fld.IsList(): + msg.Mutable(fld).List().Append(value) + default: + if msg.Has(fld) { + return interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld)) + } + msg.Set(fld, value) + } + return nil +} + +// checkFieldUsagesInMessage verifies that all fields present in the given +// message can be used for the given target type. When an AST is +// present, we validate each field as it is processed. But without +// an AST, we unmarshal a message from an uninterpreted option's +// aggregate value string, and then must make sure that all fields +// set in that message are valid. This reports an error for each +// invalid field it encounters and returns a non-nil error if/when +// the handler returns a non-nil error. +func (interp *interpreter) checkFieldUsagesInMessage( + targetType descriptorpb.FieldOptions_OptionTargetType, + msg protoreflect.Message, + node ast.Node, +) error { + var err error + msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + err = interp.checkFieldUsage(targetType, fld, node) + if err != nil { + return false + } + switch { + case fld.IsList() && fld.Message() != nil: + listVal := val.List() + for i, length := 0, listVal.Len(); i < length; i++ { + err = interp.checkFieldUsagesInMessage(targetType, listVal.Get(i).Message(), node) + if err != nil { + return false + } + } + case fld.IsMap() && fld.MapValue().Message() != nil: + mapVal := val.Map() + mapVal.Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool { + err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node) + return err == nil + }) + case !fld.IsMap() && fld.Message() != nil: + err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node) + } + return err == nil + }) + return err +} + +func (interp *interpreter) enableLenience(enable bool) { + if !interp.lenient { + return // nothing to do + } + if enable { + // reset the flag that tracks if an error has been reported + interp.lenientErrReported = false + } + interp.lenienceEnabled = enable +} + +func setMapEntry( + fld protoreflect.FieldDescriptor, + msg protoreflect.Message, + mapVal protoreflect.Map, + entry protoreflect.Message, +) { + keyFld, valFld := fld.MapKey(), fld.MapValue() + key := entry.Get(keyFld) + val := entry.Get(valFld) + if fld.MapValue().Kind() == protoreflect.MessageKind { + // Replace any nil/invalid values with an empty message + dm, valIsDynamic := val.Interface().(*dynamicpb.Message) + if (valIsDynamic && dm == nil) || !val.Message().IsValid() { + val = protoreflect.ValueOfMessage(dynamicpb.NewMessage(valFld.Message())) + } + _, containerIsDynamic := msg.Interface().(*dynamicpb.Message) + if valIsDynamic && !containerIsDynamic { + // This happens because we create dynamic messages to represent map entries, + // but the container of the map may expect a non-dynamic, generated type. + dest := mapVal.NewValue() + _, destIsDynamic := dest.Message().Interface().(*dynamicpb.Message) + if !destIsDynamic { + // reflection Set methods do not support cases where destination is + // generated but source is dynamic (or vice versa). But proto.Merge + // *DOES* support that, as long as dest and source use the same + // descriptor. + proto.Merge(dest.Message().Interface(), val.Message().Interface()) + val = dest + } + } + } + // TODO: error if key is already present + mapVal.Set(key.MapKey(), val) +} + +type msgLiteralResolver struct { + interp *interpreter + pkg protoreflect.FullName +} + +func (r *msgLiteralResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + if r.interp.resolver == nil { + return nil, protoregistry.NotFound + } + return r.interp.resolver.FindMessageByName(message) +} + +func (r *msgLiteralResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // In a message literal, we don't allow arbitrary URL prefixes + pos := strings.LastIndexByte(url, '/') + var urlPrefix string + if pos > 0 { + urlPrefix = url[:pos] + } + if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { + return nil, fmt.Errorf("could not resolve type reference %s", url) + } + return r.FindMessageByName(protoreflect.FullName(url[pos+1:])) +} + +func (r *msgLiteralResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r.interp.resolver == nil { + return nil, protoregistry.NotFound + } + // In a message literal, extension name may be partially qualified, relative to package. + // So we have to search through package scopes. + pkg := r.pkg + for { + // TODO: This does not *fully* implement the insane logic of protoc with regards + // to resolving relative references. + // https://protobuf.com/docs/language-spec#reference-resolution + name := pkg.Append(protoreflect.Name(field)) + ext, err := r.interp.resolver.FindExtensionByName(name) + if err == nil { + return ext, nil + } + if pkg == "" { + // no more namespaces to check + return nil, err + } + pkg = pkg.Parent() + } +} + +func (r *msgLiteralResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r.interp.resolver == nil { + return nil, protoregistry.NotFound + } + return r.interp.resolver.FindExtensionByNumber(message, field) +} + +func fieldName(fld protoreflect.FieldDescriptor) string { + if fld.IsExtension() { + return fmt.Sprintf("(%s)", fld.FullName()) + } + return string(fld.Name()) +} + +func valueKind(val interface{}) string { + switch val := val.(type) { + case ast.Identifier: + return "identifier" + case bool: + return "bool" + case int64: + if val < 0 { + return "negative integer" + } + return "integer" + case uint64: + return "integer" + case float64: + return "double" + case string, []byte: + return "string" + case []*ast.MessageFieldNode: + return "message" + case []ast.ValueNode: + return "array" + default: + return fmt.Sprintf("%T", val) + } +} + +func optionValueKind(opt *descriptorpb.UninterpretedOption) string { + switch { + case opt.IdentifierValue != nil: + return "identifier" + case opt.PositiveIntValue != nil: + return "integer" + case opt.NegativeIntValue != nil: + return "negative integer" + case opt.DoubleValue != nil: + return "double" + case opt.StringValue != nil: + return "string" + case opt.AggregateValue != nil: + return "message" + default: + // should not be possible + return "" + } +} + +// fieldValue computes a compile-time value (constant or list or message literal) for the given +// AST node val. The value in val must be assignable to the field fld. +// +// If the returned value is not valid, then an error occurred during processing. +// The returned err may be nil, however, as any errors will already have been +// handled (so the resulting error could be nil if the handler returned nil). +func (interp *interpreter) fieldValue( + targetType descriptorpb.FieldOptions_OptionTargetType, + mc *internal.MessageContext, + msg protoreflect.Message, + fld protoreflect.FieldDescriptor, + val ast.ValueNode, + insideMsgLiteral bool, + pathPrefix []int32, +) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) { + k := fld.Kind() + switch k { + case protoreflect.EnumKind: + num, _, err := interp.enumFieldValue(mc, fld.Enum(), val, insideMsgLiteral) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err) + } + return protoreflect.ValueOfEnum(num), newSrcInfo(pathPrefix, nil), nil + + case protoreflect.MessageKind, protoreflect.GroupKind: + v := val.Value() + if aggs, ok := v.([]*ast.MessageFieldNode); ok { + var childMsg protoreflect.Message + switch { + case fld.IsList(): + // List of messages + val := msg.NewField(fld) + childMsg = val.List().NewElement().Message() + case fld.IsMap(): + // No generated type for map entries, so we use a dynamic type + childMsg = dynamicpb.NewMessage(fld.Message()) + default: + // Normal message field + childMsg = msg.NewField(fld).Message() + } + return interp.messageLiteralValue(targetType, mc, aggs, childMsg, pathPrefix) + } + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, + interp.handleErrorf(interp.nodeInfo(val), "%vexpecting message, got %s", mc, valueKind(v)) + + default: + v, err := interp.scalarFieldValue(mc, descriptorpb.FieldDescriptorProto_Type(k), val, insideMsgLiteral) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err) + } + return protoreflect.ValueOf(v), newSrcInfo(pathPrefix, nil), nil + } +} + +// enumFieldValue resolves the given AST node val as an enum value descriptor. If the given +// value is not a valid identifier (or number if allowed), an error is returned instead. +func (interp *interpreter) enumFieldValue( + mc *internal.MessageContext, + ed protoreflect.EnumDescriptor, + val ast.ValueNode, + allowNumber bool, +) (protoreflect.EnumNumber, protoreflect.Name, error) { + v := val.Value() + var num protoreflect.EnumNumber + switch v := v.(type) { + case ast.Identifier: + name := protoreflect.Name(v) + ev := ed.Values().ByName(name) + if ev == nil { + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%venum %s has no value named %s", mc, ed.FullName(), v) + } + return ev.Number(), name, nil + case int64: + if !allowNumber { + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v)) + } + if v > math.MaxInt32 || v < math.MinInt32 { + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v) + } + num = protoreflect.EnumNumber(v) + case uint64: + if !allowNumber { + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v)) + } + if v > math.MaxInt32 { + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v) + } + num = protoreflect.EnumNumber(v) + default: + return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum, got %s", mc, valueKind(v)) + } + ev := ed.Values().ByNumber(num) + if ev != nil { + return num, ev.Name(), nil + } + if ed.IsClosed() { + return num, "", reporter.Errorf(interp.nodeInfo(val), "%vclosed enum %s has no value with number %d", mc, ed.FullName(), num) + } + // unknown value, but enum is open, so we allow it and return blank name + return num, "", nil +} + +// enumFieldValueFromProto resolves the given uninterpreted option value as an enum value descriptor. +// If the given value is not a valid identifier, an error is returned instead. +func (interp *interpreter) enumFieldValueFromProto( + mc *internal.MessageContext, + ed protoreflect.EnumDescriptor, + opt *descriptorpb.UninterpretedOption, + node ast.Node, +) (protoreflect.EnumNumber, protoreflect.Name, error) { + // We don't have to worry about allowing numbers because numbers are never allowed + // in uninterpreted values; they are only allowed inside aggregate values (i.e. + // message literals). + switch { + case opt.IdentifierValue != nil: + name := protoreflect.Name(opt.GetIdentifierValue()) + ev := ed.Values().ByName(name) + if ev == nil { + return 0, "", reporter.Errorf(interp.nodeInfo(node), "%venum %s has no value named %s", mc, ed.FullName(), name) + } + return ev.Number(), name, nil + default: + return 0, "", reporter.Errorf(interp.nodeInfo(node), "%vexpecting enum, got %s", mc, optionValueKind(opt)) + } +} + +// scalarFieldValue resolves the given AST node val as a value whose type is assignable to a +// field with the given fldType. +func (interp *interpreter) scalarFieldValue( + mc *internal.MessageContext, + fldType descriptorpb.FieldDescriptorProto_Type, + val ast.ValueNode, + insideMsgLiteral bool, +) (interface{}, error) { + v := val.Value() + switch fldType { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if b, ok := v.(bool); ok { + return b, nil + } + if id, ok := v.(ast.Identifier); ok { + if insideMsgLiteral { + // inside a message literal, values use the protobuf text format, + // which is lenient in that it accepts "t" and "f" or "True" and "False" + switch id { + case "t", "true", "True": + return true, nil + case "f", "false", "False": + return false, nil + } + } else { + // options with simple scalar values (no message literal) are stricter + switch id { + case "true": + return true, nil + case "false": + return false, nil + } + } + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bool, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + if str, ok := v.(string); ok { + return []byte(str), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bytes, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + if str, ok := v.(string); ok { + return str, nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting string, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, ok := v.(int64); ok { + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, i) + } + return int32(i), nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxInt32 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, ui) + } + return int32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int32, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, ok := v.(int64); ok { + if i > math.MaxUint32 || i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, i) + } + return uint32(i), nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxUint32 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, ui) + } + return uint32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint32, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if i, ok := v.(int64); ok { + return i, nil + } + if ui, ok := v.(uint64); ok { + if ui > math.MaxInt64 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int64", mc, ui) + } + return int64(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int64, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if i, ok := v.(int64); ok { + if i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint64", mc, i) + } + return uint64(i), nil + } + if ui, ok := v.(uint64); ok { + return ui, nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint64, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return math.Inf(1), nil + case "nan": + return math.NaN(), nil + } + } + if d, ok := v.(float64); ok { + return d, nil + } + if i, ok := v.(int64); ok { + return float64(i), nil + } + if u, ok := v.(uint64); ok { + return float64(u), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting double, got %s", mc, valueKind(v)) + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return float32(math.Inf(1)), nil + case "nan": + return float32(math.NaN()), nil + } + } + if d, ok := v.(float64); ok { + return float32(d), nil + } + if i, ok := v.(int64); ok { + return float32(i), nil + } + if u, ok := v.(uint64); ok { + return float32(u), nil + } + return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting float, got %s", mc, valueKind(v)) + default: + return nil, reporter.Errorf(interp.nodeInfo(val), "%vunrecognized field type: %s", mc, fldType) + } +} + +// scalarFieldValue resolves the given uninterpreted option value as a value whose type is +// assignable to a field with the given fldType. +func (interp *interpreter) scalarFieldValueFromProto( + mc *internal.MessageContext, + fldType descriptorpb.FieldDescriptorProto_Type, + opt *descriptorpb.UninterpretedOption, + node ast.Node, +) (interface{}, error) { + switch fldType { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if opt.IdentifierValue != nil { + switch opt.GetIdentifierValue() { + case "true": + return true, nil + case "false": + return false, nil + } + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bool, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + if opt.StringValue != nil { + return opt.GetStringValue(), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bytes, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + if opt.StringValue != nil { + return string(opt.GetStringValue()), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting string, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if opt.NegativeIntValue != nil { + i := opt.GetNegativeIntValue() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, i) + } + return int32(i), nil + } + if opt.PositiveIntValue != nil { + ui := opt.GetPositiveIntValue() + if ui > math.MaxInt32 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, ui) + } + return int32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int32, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if opt.NegativeIntValue != nil { + i := opt.GetNegativeIntValue() + if i > math.MaxUint32 || i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, i) + } + return uint32(i), nil + } + if opt.PositiveIntValue != nil { + ui := opt.GetPositiveIntValue() + if ui > math.MaxUint32 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, ui) + } + return uint32(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint32, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if opt.NegativeIntValue != nil { + return opt.GetNegativeIntValue(), nil + } + if opt.PositiveIntValue != nil { + ui := opt.GetPositiveIntValue() + if ui > math.MaxInt64 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int64", mc, ui) + } + return int64(ui), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int64, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if opt.NegativeIntValue != nil { + i := opt.GetNegativeIntValue() + if i < 0 { + return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint64", mc, i) + } + // should not be possible since i should always be negative... + return uint64(i), nil + } + if opt.PositiveIntValue != nil { + return opt.GetPositiveIntValue(), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint64, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + if opt.IdentifierValue != nil { + switch opt.GetIdentifierValue() { + case "inf": + return math.Inf(1), nil + case "nan": + return math.NaN(), nil + } + } + if opt.DoubleValue != nil { + return opt.GetDoubleValue(), nil + } + if opt.NegativeIntValue != nil { + return float64(opt.GetNegativeIntValue()), nil + } + if opt.PositiveIntValue != nil { + return float64(opt.GetPositiveIntValue()), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting double, got %s", mc, optionValueKind(opt)) + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if opt.IdentifierValue != nil { + switch opt.GetIdentifierValue() { + case "inf": + return float32(math.Inf(1)), nil + case "nan": + return float32(math.NaN()), nil + } + } + if opt.DoubleValue != nil { + return float32(opt.GetDoubleValue()), nil + } + if opt.NegativeIntValue != nil { + return float32(opt.GetNegativeIntValue()), nil + } + if opt.PositiveIntValue != nil { + return float32(opt.GetPositiveIntValue()), nil + } + return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting float, got %s", mc, optionValueKind(opt)) + default: + return nil, reporter.Errorf(interp.nodeInfo(node), "%vunrecognized field type: %s", mc, fldType) + } +} + +func descriptorType(m proto.Message) string { + switch m := m.(type) { + case *descriptorpb.DescriptorProto: + return "message" + case *descriptorpb.DescriptorProto_ExtensionRange: + return "extension range" + case *descriptorpb.FieldDescriptorProto: + if m.GetExtendee() == "" { + return "field" + } + return "extension" + case *descriptorpb.EnumDescriptorProto: + return "enum" + case *descriptorpb.EnumValueDescriptorProto: + return "enum value" + case *descriptorpb.ServiceDescriptorProto: + return "service" + case *descriptorpb.MethodDescriptorProto: + return "method" + case *descriptorpb.FileDescriptorProto: + return "file" + default: + // shouldn't be possible + return fmt.Sprintf("%T", m) + } +} + +// messageLiteralValue processes a message literal value. +// +// If the returned value is not valid, then an error occurred during processing. +// The returned err may be nil, however, as any errors will already have been +// handled (so the resulting error could be nil if the handler returned nil). +func (interp *interpreter) messageLiteralValue( + targetType descriptorpb.FieldOptions_OptionTargetType, + mc *internal.MessageContext, + fieldNodes []*ast.MessageFieldNode, + msg protoreflect.Message, + pathPrefix []int32, +) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) { + fmd := msg.Descriptor() + origPath := mc.OptAggPath + defer func() { + mc.OptAggPath = origPath + }() + flds := make(map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo, len(fieldNodes)) + var hadError bool + for _, fieldNode := range fieldNodes { + if origPath == "" { + mc.OptAggPath = fieldNode.Name.Value() + } else { + mc.OptAggPath = origPath + "." + fieldNode.Name.Value() + } + if fieldNode.Name.IsAnyTypeReference() { + if len(fieldNodes) > 1 { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vany type references cannot be repeated or mixed with other fields", mc) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + } + + if fmd.FullName() != "google.protobuf.Any" { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.FullName()) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + typeURLDescriptor := fmd.Fields().ByNumber(internal.AnyTypeURLTag) + var err error + switch { + case typeURLDescriptor == nil: + err = fmt.Errorf("message schema is missing type_url field (number %d)", internal.AnyTypeURLTag) + case typeURLDescriptor.IsList(): + err = fmt.Errorf("message schema has type_url field (number %d) that is a list but should be singular", internal.AnyTypeURLTag) + case typeURLDescriptor.Kind() != protoreflect.StringKind: + err = fmt.Errorf("message schema has type_url field (number %d) that is %s but should be string", internal.AnyTypeURLTag, typeURLDescriptor.Kind()) + } + if err != nil { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + valueDescriptor := fmd.Fields().ByNumber(internal.AnyValueTag) + switch { + case valueDescriptor == nil: + err = fmt.Errorf("message schema is missing value field (number %d)", internal.AnyValueTag) + case valueDescriptor.IsList(): + err = fmt.Errorf("message schema has value field (number %d) that is a list but should be singular", internal.AnyValueTag) + case valueDescriptor.Kind() != protoreflect.BytesKind: + err = fmt.Errorf("message schema has value field (number %d) that is %s but should be bytes", internal.AnyValueTag, valueDescriptor.Kind()) + } + if err != nil { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + + urlPrefix := fieldNode.Name.URLPrefix.AsIdentifier() + msgName := fieldNode.Name.Name.AsIdentifier() + fullURL := fmt.Sprintf("%s/%s", urlPrefix, msgName) + // TODO: Support other URLs dynamically -- the caller of protocompile + // should be able to provide a custom resolver that can resolve type + // URLs into message descriptors. The default resolver would be + // implemented as below, only accepting "type.googleapis.com" and + // "type.googleprod.com" as hosts/prefixes and using the compiled + // file's transitive closure to find the named message, since that + // is what protoc does. + if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + anyFields, ok := fieldNode.Val.Value().([]*ast.MessageFieldNode) + if !ok { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vtype references for google.protobuf.Any must have message literal value", mc) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + anyMd := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, string(msgName)) + if anyMd == nil { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + // parse the message value + msgVal, valueSrcInfo, err := interp.messageLiteralValue(targetType, mc, anyFields, dynamicpb.NewMessage(anyMd), append(pathPrefix, internal.AnyValueTag)) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } else if !msgVal.IsValid() { + hadError = true + continue + } + + b, err := (proto.MarshalOptions{Deterministic: true}).Marshal(msgVal.Message().Interface()) + if err != nil { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vfailed to serialize message value: %w", mc, err) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + + // Success! + if !hadError { + msg.Set(typeURLDescriptor, protoreflect.ValueOfString(fullURL)) + msg.Set(valueDescriptor, protoreflect.ValueOfBytes(b)) + flds[fieldNode] = &valueSrcInfo + } + continue + } + + // Not expanded Any syntax; handle normal field. + var ffld protoreflect.FieldDescriptor + var err error + if fieldNode.Name.IsExtension() { + n := interp.file.ResolveMessageLiteralExtensionName(fieldNode.Name.Name) + if n == "" { + // this should not be possible! + n = string(fieldNode.Name.Name.AsIdentifier()) + } + ffld, err = interp.resolveExtensionType(n) + if errors.Is(err, protoregistry.NotFound) { + // may need to qualify with package name + // (this should not be necessary!) + pkg := mc.File.FileDescriptorProto().GetPackage() + if pkg != "" { + ffld, err = interp.resolveExtensionType(pkg + "." + n) + } + } + } else { + ffld = fmd.Fields().ByName(protoreflect.Name(fieldNode.Name.Value())) + if ffld == nil { + err = protoregistry.NotFound + // It could be a proto2 group, where the text format refers to the group type + // name, and the field name is the lower-cased form of that. + ffld = fmd.Fields().ByName(protoreflect.Name(strings.ToLower(fieldNode.Name.Value()))) + if ffld != nil { + // In editions, we support using the group type name only for fields that + // "look like" proto2 groups. + if protoreflect.Name(fieldNode.Name.Value()) == ffld.Message().Name() && // text format uses type name + ffld.Message().FullName().Parent() == ffld.FullName().Parent() && // message and field declared in same scope + ffld.Kind() == protoreflect.GroupKind /* uses delimited encoding */ { + // This one looks like a proto2 group, so it's a keeper. + err = nil + } else { + // It doesn't look like a proto2 group, so this is not a match. + ffld = nil + } + } + } + } + if errors.Is(err, protoregistry.NotFound) { + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%vfield %s not found", mc, string(fieldNode.Name.Name.AsIdentifier())) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } else if err != nil { + err := interp.handleErrorWithPos(interp.nodeInfo(fieldNode.Name), err) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + if err := interp.checkFieldUsage(targetType, ffld, fieldNode.Name); err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + if fieldNode.Sep == nil && ffld.Message() == nil { + // If there is no separator, the field type should be a message. + // Otherwise, it is an error in the text format. + err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "syntax error: unexpected value, expecting ':'") + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + hadError = true + continue + } + srcInfo, err := interp.setOptionField(targetType, mc, msg, ffld, fieldNode.Name, fieldNode.Val, true, append(pathPrefix, int32(ffld.Number()))) + if err != nil { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + } + if srcInfo != nil { + flds[fieldNode] = srcInfo + } + } + if hadError { + return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, nil + } + return protoreflect.ValueOfMessage(msg), + newSrcInfo(pathPrefix, &sourceinfo.MessageLiteralSourceInfo{Fields: flds}), + nil +} + +func newSrcInfo(path []int32, children sourceinfo.OptionChildrenSourceInfo) sourceinfo.OptionSourceInfo { + return sourceinfo.OptionSourceInfo{ + Path: internal.ClonePath(path), + Children: children, + } +} diff --git a/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go b/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go new file mode 100644 index 00000000..05c3e292 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go @@ -0,0 +1,539 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/internal" +) + +// StripSourceRetentionOptionsFromFile returns a file descriptor proto that omits any +// options in file that are defined to be retained only in source. If file has no +// such options, then it is returned as is. If it does have such options, a copy is +// made; the given file will not be mutated. +// +// Even when a copy is returned, it is not a deep copy: it may share data with the +// original file. So callers should not mutate the returned file unless mutating the +// input file is also safe. +func StripSourceRetentionOptionsFromFile(file *descriptorpb.FileDescriptorProto) (*descriptorpb.FileDescriptorProto, error) { + var path sourcePath + var removedPaths *sourcePathTrie + if file.SourceCodeInfo != nil && len(file.SourceCodeInfo.Location) > 0 { + path = make(sourcePath, 0, 16) + removedPaths = &sourcePathTrie{} + } + var dirty bool + optionsPath := path.push(internal.FileOptionsTag) + newOpts, err := stripSourceRetentionOptions(file.GetOptions(), optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts != file.GetOptions() { + dirty = true + } + msgsPath := path.push(internal.FileMessagesTag) + newMsgs, changed, err := stripOptionsFromAll(file.GetMessageType(), stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + enumsPath := path.push(internal.FileEnumsTag) + newEnums, changed, err := stripOptionsFromAll(file.GetEnumType(), stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + extsPath := path.push(internal.FileExtensionsTag) + newExts, changed, err := stripOptionsFromAll(file.GetExtension(), stripSourceRetentionOptionsFromField, extsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + svcsPath := path.push(internal.FileServicesTag) + newSvcs, changed, err := stripOptionsFromAll(file.GetService(), stripSourceRetentionOptionsFromService, svcsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + + if !dirty { + return file, nil + } + + newFile, err := shallowCopy(file) + if err != nil { + return nil, err + } + newFile.Options = newOpts + newFile.MessageType = newMsgs + newFile.EnumType = newEnums + newFile.Extension = newExts + newFile.Service = newSvcs + newFile.SourceCodeInfo = stripSourcePathsForSourceRetentionOptions(newFile.SourceCodeInfo, removedPaths) + return newFile, nil +} + +type sourcePath protoreflect.SourcePath + +func (p sourcePath) push(element int32) sourcePath { + if p == nil { + return nil + } + return append(p, element) +} + +type sourcePathTrie struct { + removed bool + children map[int32]*sourcePathTrie +} + +func (t *sourcePathTrie) addPath(p sourcePath) { + if t == nil { + return + } + if len(p) == 0 { + t.removed = true + return + } + child := t.children[p[0]] + if child == nil { + if t.children == nil { + t.children = map[int32]*sourcePathTrie{} + } + child = &sourcePathTrie{} + t.children[p[0]] = child + } + child.addPath(p[1:]) +} + +func (t *sourcePathTrie) isRemoved(p []int32) bool { + if t == nil { + return false + } + if t.removed { + return true + } + if len(p) == 0 { + return false + } + child := t.children[p[0]] + if child == nil { + return false + } + return child.isRemoved(p[1:]) +} + +func stripSourceRetentionOptions[M proto.Message]( + options M, + path sourcePath, + removedPaths *sourcePathTrie, +) (M, error) { + optionsRef := options.ProtoReflect() + // See if there are any options to strip. + var hasFieldToStrip bool + var numFieldsToKeep int + var err error + optionsRef.Range(func(field protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions) + if !ok { + err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts) + return false + } + if fieldOpts.GetRetention() == descriptorpb.FieldOptions_RETENTION_SOURCE { + hasFieldToStrip = true + } else { + numFieldsToKeep++ + } + return true + }) + var zero M + if err != nil { + return zero, err + } + if !hasFieldToStrip { + return options, nil + } + + if numFieldsToKeep == 0 { + // Stripping the message would remove *all* options. In that case, + // we'll clear out the options by returning the zero value (i.e. nil). + removedPaths.addPath(path) // clear out all source locations, too + return zero, nil + } + + // There is at least one option to remove. So we need to make a copy that does not have those options. + newOptions := optionsRef.New() + ret, ok := newOptions.Interface().(M) + if !ok { + return zero, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", newOptions.Interface(), zero) + } + optionsRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool { + fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions) + if !ok { + err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts) + return false + } + if fieldOpts.GetRetention() != descriptorpb.FieldOptions_RETENTION_SOURCE { + newOptions.Set(field, val) + } else { + removedPaths.addPath(path.push(int32(field.Number()))) + } + return true + }) + if err != nil { + return zero, err + } + return ret, nil +} + +func stripSourceRetentionOptionsFromMessage( + msg *descriptorpb.DescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.DescriptorProto, error) { + var dirty bool + optionsPath := path.push(internal.MessageOptionsTag) + newOpts, err := stripSourceRetentionOptions(msg.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts != msg.Options { + dirty = true + } + fieldsPath := path.push(internal.MessageFieldsTag) + newFields, changed, err := stripOptionsFromAll(msg.Field, stripSourceRetentionOptionsFromField, fieldsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + oneofsPath := path.push(internal.MessageOneofsTag) + newOneofs, changed, err := stripOptionsFromAll(msg.OneofDecl, stripSourceRetentionOptionsFromOneof, oneofsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + extRangesPath := path.push(internal.MessageExtensionRangesTag) + newExtRanges, changed, err := stripOptionsFromAll(msg.ExtensionRange, stripSourceRetentionOptionsFromExtensionRange, extRangesPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + msgsPath := path.push(internal.MessageNestedMessagesTag) + newMsgs, changed, err := stripOptionsFromAll(msg.NestedType, stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + enumsPath := path.push(internal.MessageEnumsTag) + newEnums, changed, err := stripOptionsFromAll(msg.EnumType, stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + extsPath := path.push(internal.MessageExtensionsTag) + newExts, changed, err := stripOptionsFromAll(msg.Extension, stripSourceRetentionOptionsFromField, extsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + + if !dirty { + return msg, nil + } + + newMsg, err := shallowCopy(msg) + if err != nil { + return nil, err + } + newMsg.Options = newOpts + newMsg.Field = newFields + newMsg.OneofDecl = newOneofs + newMsg.ExtensionRange = newExtRanges + newMsg.NestedType = newMsgs + newMsg.EnumType = newEnums + newMsg.Extension = newExts + return newMsg, nil +} + +func stripSourceRetentionOptionsFromField( + field *descriptorpb.FieldDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.FieldDescriptorProto, error) { + optionsPath := path.push(internal.FieldOptionsTag) + newOpts, err := stripSourceRetentionOptions(field.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts == field.Options { + return field, nil + } + newField, err := shallowCopy(field) + if err != nil { + return nil, err + } + newField.Options = newOpts + return newField, nil +} + +func stripSourceRetentionOptionsFromOneof( + oneof *descriptorpb.OneofDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.OneofDescriptorProto, error) { + optionsPath := path.push(internal.OneofOptionsTag) + newOpts, err := stripSourceRetentionOptions(oneof.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts == oneof.Options { + return oneof, nil + } + newOneof, err := shallowCopy(oneof) + if err != nil { + return nil, err + } + newOneof.Options = newOpts + return newOneof, nil +} + +func stripSourceRetentionOptionsFromExtensionRange( + extRange *descriptorpb.DescriptorProto_ExtensionRange, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.DescriptorProto_ExtensionRange, error) { + optionsPath := path.push(internal.ExtensionRangeOptionsTag) + newOpts, err := stripSourceRetentionOptions(extRange.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts == extRange.Options { + return extRange, nil + } + newExtRange, err := shallowCopy(extRange) + if err != nil { + return nil, err + } + newExtRange.Options = newOpts + return newExtRange, nil +} + +func stripSourceRetentionOptionsFromEnum( + enum *descriptorpb.EnumDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.EnumDescriptorProto, error) { + var dirty bool + optionsPath := path.push(internal.EnumOptionsTag) + newOpts, err := stripSourceRetentionOptions(enum.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts != enum.Options { + dirty = true + } + valsPath := path.push(internal.EnumValuesTag) + newVals, changed, err := stripOptionsFromAll(enum.Value, stripSourceRetentionOptionsFromEnumValue, valsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + + if !dirty { + return enum, nil + } + + newEnum, err := shallowCopy(enum) + if err != nil { + return nil, err + } + newEnum.Options = newOpts + newEnum.Value = newVals + return newEnum, nil +} + +func stripSourceRetentionOptionsFromEnumValue( + enumVal *descriptorpb.EnumValueDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.EnumValueDescriptorProto, error) { + optionsPath := path.push(internal.EnumValOptionsTag) + newOpts, err := stripSourceRetentionOptions(enumVal.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts == enumVal.Options { + return enumVal, nil + } + newEnumVal, err := shallowCopy(enumVal) + if err != nil { + return nil, err + } + newEnumVal.Options = newOpts + return newEnumVal, nil +} + +func stripSourceRetentionOptionsFromService( + svc *descriptorpb.ServiceDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.ServiceDescriptorProto, error) { + var dirty bool + optionsPath := path.push(internal.ServiceOptionsTag) + newOpts, err := stripSourceRetentionOptions(svc.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts != svc.Options { + dirty = true + } + methodsPath := path.push(internal.ServiceMethodsTag) + newMethods, changed, err := stripOptionsFromAll(svc.Method, stripSourceRetentionOptionsFromMethod, methodsPath, removedPaths) + if err != nil { + return nil, err + } + if changed { + dirty = true + } + + if !dirty { + return svc, nil + } + + newSvc, err := shallowCopy(svc) + if err != nil { + return nil, err + } + newSvc.Options = newOpts + newSvc.Method = newMethods + return newSvc, nil +} + +func stripSourceRetentionOptionsFromMethod( + method *descriptorpb.MethodDescriptorProto, + path sourcePath, + removedPaths *sourcePathTrie, +) (*descriptorpb.MethodDescriptorProto, error) { + optionsPath := path.push(internal.MethodOptionsTag) + newOpts, err := stripSourceRetentionOptions(method.Options, optionsPath, removedPaths) + if err != nil { + return nil, err + } + if newOpts == method.Options { + return method, nil + } + newMethod, err := shallowCopy(method) + if err != nil { + return nil, err + } + newMethod.Options = newOpts + return newMethod, nil +} + +func stripSourcePathsForSourceRetentionOptions( + sourceInfo *descriptorpb.SourceCodeInfo, + removedPaths *sourcePathTrie, +) *descriptorpb.SourceCodeInfo { + if sourceInfo == nil || len(sourceInfo.Location) == 0 || removedPaths == nil { + // nothing to do + return sourceInfo + } + newLocations := make([]*descriptorpb.SourceCodeInfo_Location, len(sourceInfo.Location)) + var i int + for _, loc := range sourceInfo.Location { + if removedPaths.isRemoved(loc.Path) { + continue + } + newLocations[i] = loc + i++ + } + newLocations = newLocations[:i] + return &descriptorpb.SourceCodeInfo{Location: newLocations} +} + +func shallowCopy[M proto.Message](msg M) (M, error) { + msgRef := msg.ProtoReflect() + other := msgRef.New() + ret, ok := other.Interface().(M) + if !ok { + return ret, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", other.Interface(), ret) + } + msgRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool { + other.Set(field, val) + return true + }) + return ret, nil +} + +// stripOptionsFromAll applies the given function to each element in the given +// slice in order to remove source-retention options from it. It returns the new +// slice and a bool indicating whether anything was actually changed. If the +// second value is false, then the returned slice is the same slice as the input +// slice. Usually, T is a pointer type, in which case the given updateFunc should +// NOT mutate the input value. Instead, it should return the input value if only +// if there is no update needed. If a mutation is needed, it should return a new +// value. +func stripOptionsFromAll[T comparable]( + slice []T, + updateFunc func(T, sourcePath, *sourcePathTrie) (T, error), + path sourcePath, + removedPaths *sourcePathTrie, +) ([]T, bool, error) { + var updated []T // initialized lazily, only when/if a copy is needed + for i, item := range slice { + newItem, err := updateFunc(item, path.push(int32(i)), removedPaths) + if err != nil { + return nil, false, err + } + if updated != nil { + updated[i] = newItem + } else if newItem != item { + updated = make([]T, len(slice)) + copy(updated[:i], slice) + updated[i] = newItem + } + } + if updated != nil { + return updated, true, nil + } + return slice, false, nil +} diff --git a/vendor/github.com/bufbuild/protocompile/options/target_types.go b/vendor/github.com/bufbuild/protocompile/options/target_types.go new file mode 100644 index 00000000..0d780754 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/options/target_types.go @@ -0,0 +1,152 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +type optionsType[T any] interface { + *T + proto.Message + GetFeatures() *descriptorpb.FeatureSet + GetUninterpretedOption() []*descriptorpb.UninterpretedOption +} + +type elementType[OptsStruct any, Opts optionsType[OptsStruct]] interface { + proto.Message + GetOptions() Opts +} + +type targetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]] struct { + t descriptorpb.FieldOptions_OptionTargetType + setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption) + setOptions func(elem Elem, opts Opts) +} + +var ( + targetTypeFile = newTargetType[*descriptorpb.FileDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_FILE, setUninterpretedFileOptions, setFileOptions, + ) + targetTypeMessage = newTargetType[*descriptorpb.DescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE, setUninterpretedMessageOptions, setMessageOptions, + ) + targetTypeField = newTargetType[*descriptorpb.FieldDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_FIELD, setUninterpretedFieldOptions, setFieldOptions, + ) + targetTypeOneof = newTargetType[*descriptorpb.OneofDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_ONEOF, setUninterpretedOneofOptions, setOneofOptions, + ) + targetTypeExtensionRange = newTargetType[*descriptorpb.DescriptorProto_ExtensionRange]( + descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE, setUninterpretedExtensionRangeOptions, setExtensionRangeOptions, + ) + targetTypeEnum = newTargetType[*descriptorpb.EnumDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_ENUM, setUninterpretedEnumOptions, setEnumOptions, + ) + targetTypeEnumValue = newTargetType[*descriptorpb.EnumValueDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY, setUninterpretedEnumValueOptions, setEnumValueOptions, + ) + targetTypeService = newTargetType[*descriptorpb.ServiceDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_SERVICE, setUninterpretedServiceOptions, setServiceOptions, + ) + targetTypeMethod = newTargetType[*descriptorpb.MethodDescriptorProto]( + descriptorpb.FieldOptions_TARGET_TYPE_METHOD, setUninterpretedMethodOptions, setMethodOptions, + ) +) + +func newTargetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]]( + t descriptorpb.FieldOptions_OptionTargetType, + setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption), + setOptions func(elem Elem, opts Opts), +) *targetType[Elem, OptsStruct, Opts] { + return &targetType[Elem, OptsStruct, Opts]{ + t: t, + setUninterpretedOptions: setUninterpretedOptions, + setOptions: setOptions, + } +} + +func setUninterpretedFileOptions(opts *descriptorpb.FileOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedMessageOptions(opts *descriptorpb.MessageOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedFieldOptions(opts *descriptorpb.FieldOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedOneofOptions(opts *descriptorpb.OneofOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedExtensionRangeOptions(opts *descriptorpb.ExtensionRangeOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedEnumOptions(opts *descriptorpb.EnumOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedEnumValueOptions(opts *descriptorpb.EnumValueOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedServiceOptions(opts *descriptorpb.ServiceOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setUninterpretedMethodOptions(opts *descriptorpb.MethodOptions, uninterpreted []*descriptorpb.UninterpretedOption) { + opts.UninterpretedOption = uninterpreted +} + +func setFileOptions(desc *descriptorpb.FileDescriptorProto, opts *descriptorpb.FileOptions) { + desc.Options = opts +} + +func setMessageOptions(desc *descriptorpb.DescriptorProto, opts *descriptorpb.MessageOptions) { + desc.Options = opts +} + +func setFieldOptions(desc *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) { + desc.Options = opts +} + +func setOneofOptions(desc *descriptorpb.OneofDescriptorProto, opts *descriptorpb.OneofOptions) { + desc.Options = opts +} + +func setExtensionRangeOptions(desc *descriptorpb.DescriptorProto_ExtensionRange, opts *descriptorpb.ExtensionRangeOptions) { + desc.Options = opts +} + +func setEnumOptions(desc *descriptorpb.EnumDescriptorProto, opts *descriptorpb.EnumOptions) { + desc.Options = opts +} + +func setEnumValueOptions(desc *descriptorpb.EnumValueDescriptorProto, opts *descriptorpb.EnumValueOptions) { + desc.Options = opts +} + +func setServiceOptions(desc *descriptorpb.ServiceDescriptorProto, opts *descriptorpb.ServiceOptions) { + desc.Options = opts +} + +func setMethodOptions(desc *descriptorpb.MethodDescriptorProto, opts *descriptorpb.MethodOptions) { + desc.Options = opts +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/.gitignore b/vendor/github.com/bufbuild/protocompile/parser/.gitignore new file mode 100644 index 00000000..26520536 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/.gitignore @@ -0,0 +1 @@ +y.output diff --git a/vendor/github.com/bufbuild/protocompile/parser/ast.go b/vendor/github.com/bufbuild/protocompile/parser/ast.go new file mode 100644 index 00000000..f58f7ae4 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/ast.go @@ -0,0 +1,144 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "github.com/bufbuild/protocompile/ast" +) + +// the types below are accumulator types, just used in intermediate productions +// to accumulate slices that will get stored in AST nodes + +type compactOptionSlices struct { + options []*ast.OptionNode + commas []*ast.RuneNode +} + +func toStringValueNode(strs []*ast.StringLiteralNode) ast.StringValueNode { + if len(strs) == 1 { + return strs[0] + } + return ast.NewCompoundLiteralStringNode(strs...) +} + +type nameSlices struct { + // only names or idents will be set, never both + names []ast.StringValueNode + idents []*ast.IdentNode + commas []*ast.RuneNode +} + +type rangeSlices struct { + ranges []*ast.RangeNode + commas []*ast.RuneNode +} + +type valueSlices struct { + vals []ast.ValueNode + commas []*ast.RuneNode +} + +type fieldRefSlices struct { + refs []*ast.FieldReferenceNode + dots []*ast.RuneNode +} + +type identSlices struct { + idents []*ast.IdentNode + dots []*ast.RuneNode +} + +func (s *identSlices) toIdentValueNode(leadingDot *ast.RuneNode) ast.IdentValueNode { + if len(s.idents) == 1 && leadingDot == nil { + // single simple name + return s.idents[0] + } + return ast.NewCompoundIdentNode(leadingDot, s.idents, s.dots) +} + +type messageFieldList struct { + field *ast.MessageFieldNode + delimiter *ast.RuneNode + next *messageFieldList +} + +func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNode) { + if list == nil { + return nil, nil + } + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + fields := make([]*ast.MessageFieldNode, l) + delimiters := make([]*ast.RuneNode, l) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + fields[i] = cur.field + if cur.delimiter != nil { + delimiters[i] = cur.delimiter + } + } + return fields, delimiters +} + +func prependRunes[T ast.Node](convert func(*ast.RuneNode) T, runes []*ast.RuneNode, elements []T) []T { + elems := make([]T, 0, len(runes)+len(elements)) + for _, rune := range runes { + elems = append(elems, convert(rune)) + } + elems = append(elems, elements...) + return elems +} + +func toServiceElement(semi *ast.RuneNode) ast.ServiceElement { + return ast.NewEmptyDeclNode(semi) +} + +func toMethodElement(semi *ast.RuneNode) ast.RPCElement { + return ast.NewEmptyDeclNode(semi) +} + +func toFileElement(semi *ast.RuneNode) ast.FileElement { + return ast.NewEmptyDeclNode(semi) +} + +func toEnumElement(semi *ast.RuneNode) ast.EnumElement { + return ast.NewEmptyDeclNode(semi) +} + +func toMessageElement(semi *ast.RuneNode) ast.MessageElement { + return ast.NewEmptyDeclNode(semi) +} + +type nodeWithRunes[T ast.Node] struct { + Node T + Runes []*ast.RuneNode +} + +func newNodeWithRunes[T ast.Node](node T, trailingRunes ...*ast.RuneNode) nodeWithRunes[T] { + return nodeWithRunes[T]{ + Node: node, + Runes: trailingRunes, + } +} + +func toElements[T ast.Node](convert func(*ast.RuneNode) T, node T, runes []*ast.RuneNode) []T { + elements := make([]T, 1+len(runes)) + elements[0] = node + for i, rune := range runes { + elements[i+1] = convert(rune) + } + return elements +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/clone.go b/vendor/github.com/bufbuild/protocompile/parser/clone.go new file mode 100644 index 00000000..04322486 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/clone.go @@ -0,0 +1,183 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +// Clone returns a copy of the given result. Since descriptor protos may be +// mutated during linking, this can return a defensive copy so that mutations +// don't impact concurrent operations in an unsafe way. This is called if the +// parse result could be re-used across concurrent operations and has unresolved +// references and options which will require mutation by the linker. +// +// If the given value has a method with the following signature, it will be +// called to perform the operation: +// +// Clone() Result +// +// If the given value does not provide a Clone method and is not the implementation +// provided by this package, it is possible for an error to occur in creating the +// copy, which may result in a panic. This can happen if the AST of the given result +// is not actually valid and a file descriptor proto cannot be successfully derived +// from it. +func Clone(r Result) Result { + if cl, ok := r.(interface{ Clone() Result }); ok { + return cl.Clone() + } + if res, ok := r.(*result); ok { + newProto := proto.Clone(res.proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + newNodes := make(map[proto.Message]ast.Node, len(res.nodes)) + newResult := &result{ + file: res.file, + proto: newProto, + nodes: newNodes, + } + recreateNodeIndexForFile(res, newResult, res.proto, newProto) + return newResult + } + + // Can't do the deep-copy we know how to do. So we have to take a + // different tactic. + if r.AST() == nil { + // no AST? all we have to do is copy the proto + fileProto := proto.Clone(r.FileDescriptorProto()).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + return ResultWithoutAST(fileProto) + } + // Otherwise, we have an AST, but no way to clone the result's + // internals. So just re-create them from scratch. + res, err := ResultFromAST(r.AST(), false, reporter.NewHandler(nil)) + if err != nil { + panic(err) + } + return res +} + +func recreateNodeIndexForFile(orig, clone *result, origProto, cloneProto *descriptorpb.FileDescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.FileOptions](orig, clone, origProto, cloneProto) + for i, origMd := range origProto.MessageType { + cloneMd := cloneProto.MessageType[i] + recreateNodeIndexForMessage(orig, clone, origMd, cloneMd) + } + for i, origEd := range origProto.EnumType { + cloneEd := cloneProto.EnumType[i] + recreateNodeIndexForEnum(orig, clone, origEd, cloneEd) + } + for i, origExtd := range origProto.Extension { + cloneExtd := cloneProto.Extension[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd) + } + for i, origSd := range origProto.Service { + cloneSd := cloneProto.Service[i] + updateNodeIndexWithOptions[*descriptorpb.ServiceOptions](orig, clone, origSd, cloneSd) + for j, origMtd := range origSd.Method { + cloneMtd := cloneSd.Method[j] + updateNodeIndexWithOptions[*descriptorpb.MethodOptions](orig, clone, origMtd, cloneMtd) + } + } +} + +func recreateNodeIndexForMessage(orig, clone *result, origProto, cloneProto *descriptorpb.DescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.MessageOptions](orig, clone, origProto, cloneProto) + for i, origFld := range origProto.Field { + cloneFld := cloneProto.Field[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origFld, cloneFld) + } + for i, origOod := range origProto.OneofDecl { + cloneOod := cloneProto.OneofDecl[i] + updateNodeIndexWithOptions[*descriptorpb.OneofOptions](orig, clone, origOod, cloneOod) + } + for i, origExtr := range origProto.ExtensionRange { + cloneExtr := cloneProto.ExtensionRange[i] + updateNodeIndex(orig, clone, asExtsNode(origExtr), asExtsNode(cloneExtr)) + updateNodeIndexWithOptions[*descriptorpb.ExtensionRangeOptions](orig, clone, origExtr, cloneExtr) + } + for i, origRr := range origProto.ReservedRange { + cloneRr := cloneProto.ReservedRange[i] + updateNodeIndex(orig, clone, origRr, cloneRr) + } + for i, origNmd := range origProto.NestedType { + cloneNmd := cloneProto.NestedType[i] + recreateNodeIndexForMessage(orig, clone, origNmd, cloneNmd) + } + for i, origEd := range origProto.EnumType { + cloneEd := cloneProto.EnumType[i] + recreateNodeIndexForEnum(orig, clone, origEd, cloneEd) + } + for i, origExtd := range origProto.Extension { + cloneExtd := cloneProto.Extension[i] + updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd) + } +} + +func recreateNodeIndexForEnum(orig, clone *result, origProto, cloneProto *descriptorpb.EnumDescriptorProto) { + updateNodeIndexWithOptions[*descriptorpb.EnumOptions](orig, clone, origProto, cloneProto) + for i, origEvd := range origProto.Value { + cloneEvd := cloneProto.Value[i] + updateNodeIndexWithOptions[*descriptorpb.EnumValueOptions](orig, clone, origEvd, cloneEvd) + } + for i, origRr := range origProto.ReservedRange { + cloneRr := cloneProto.ReservedRange[i] + updateNodeIndex(orig, clone, origRr, cloneRr) + } +} + +func recreateNodeIndexForOptions(orig, clone *result, origProtos, cloneProtos []*descriptorpb.UninterpretedOption) { + for i, origOpt := range origProtos { + cloneOpt := cloneProtos[i] + updateNodeIndex(orig, clone, origOpt, cloneOpt) + for j, origName := range origOpt.Name { + cloneName := cloneOpt.Name[j] + updateNodeIndex(orig, clone, origName, cloneName) + } + } +} + +func updateNodeIndex[M proto.Message](orig, clone *result, origProto, cloneProto M) { + node := orig.nodes[origProto] + if node != nil { + clone.nodes[cloneProto] = node + } +} + +type pointerMessage[T any] interface { + *T + proto.Message +} + +type options[T any] interface { + // need this type instead of just proto.Message so we can check for nil pointer + pointerMessage[T] + GetUninterpretedOption() []*descriptorpb.UninterpretedOption +} + +type withOptions[O options[T], T any] interface { + proto.Message + GetOptions() O +} + +func updateNodeIndexWithOptions[O options[T], M withOptions[O, T], T any](orig, clone *result, origProto, cloneProto M) { + updateNodeIndex(orig, clone, origProto, cloneProto) + origOpts := origProto.GetOptions() + cloneOpts := cloneProto.GetOptions() + if origOpts != nil { + recreateNodeIndexForOptions(orig, clone, origOpts.GetUninterpretedOption(), cloneOpts.GetUninterpretedOption()) + } +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/doc.go b/vendor/github.com/bufbuild/protocompile/parser/doc.go new file mode 100644 index 00000000..40555543 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/doc.go @@ -0,0 +1,25 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser contains the logic for parsing protobuf source code into an +// AST (abstract syntax tree) and also for converting an AST into a descriptor +// proto. +// +// A FileDescriptorProto is very similar to an AST, but the AST this package +// uses is more useful because it contains more information about the source +// code, including details about whitespace and comments, that cannot be +// represented by a descriptor proto. This makes it ideal for things like +// code formatters, which may want to preserve things like whitespace and +// comment format. +package parser diff --git a/vendor/github.com/bufbuild/protocompile/parser/errors.go b/vendor/github.com/bufbuild/protocompile/parser/errors.go new file mode 100644 index 00000000..e78bddad --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/errors.go @@ -0,0 +1,22 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import "errors" + +// ErrNoSyntax is a sentinel error that may be passed to a warning reporter. +// The error the reporter receives will be wrapped with source position that +// indicates the file that had no syntax statement. +var ErrNoSyntax = errors.New("no syntax specified; defaulting to proto2 syntax") diff --git a/vendor/github.com/bufbuild/protocompile/parser/lexer.go b/vendor/github.com/bufbuild/protocompile/parser/lexer.go new file mode 100644 index 00000000..71cbc7ac --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/lexer.go @@ -0,0 +1,771 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" + "unicode/utf8" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +type runeReader struct { + data []byte + pos int + err error + mark int + // Enable this check to make input required to be valid UTF-8. + // For now, since protoc allows invalid UTF-8, default to false. + utf8Strict bool +} + +func (rr *runeReader) readRune() (r rune, size int, err error) { + if rr.err != nil { + return 0, 0, rr.err + } + if rr.pos == len(rr.data) { + rr.err = io.EOF + return 0, 0, rr.err + } + r, sz := utf8.DecodeRune(rr.data[rr.pos:]) + if rr.utf8Strict && r == utf8.RuneError { + rr.err = fmt.Errorf("invalid UTF8 at offset %d: %x", rr.pos, rr.data[rr.pos]) + return 0, 0, rr.err + } + rr.pos += sz + return r, sz, nil +} + +func (rr *runeReader) offset() int { + return rr.pos +} + +func (rr *runeReader) unreadRune(sz int) { + newPos := rr.pos - sz + if newPos < rr.mark { + panic("unread past mark") + } + rr.pos = newPos +} + +func (rr *runeReader) setMark() { + rr.mark = rr.pos +} + +func (rr *runeReader) getMark() string { + return string(rr.data[rr.mark:rr.pos]) +} + +type protoLex struct { + input *runeReader + info *ast.FileInfo + handler *reporter.Handler + res *ast.FileNode + + prevSym ast.TerminalNode + prevOffset int + eof ast.Token + + comments []ast.Token +} + +var utf8Bom = []byte{0xEF, 0xBB, 0xBF} + +func newLexer(in io.Reader, filename string, handler *reporter.Handler) (*protoLex, error) { + br := bufio.NewReader(in) + + // if file has UTF8 byte order marker preface, consume it + marker, err := br.Peek(3) + if err == nil && bytes.Equal(marker, utf8Bom) { + _, _ = br.Discard(3) + } + + contents, err := io.ReadAll(br) + if err != nil { + return nil, err + } + return &protoLex{ + input: &runeReader{data: contents}, + info: ast.NewFileInfo(filename, contents), + handler: handler, + }, nil +} + +var keywords = map[string]int{ + "syntax": _SYNTAX, + "edition": _EDITION, + "import": _IMPORT, + "weak": _WEAK, + "public": _PUBLIC, + "package": _PACKAGE, + "option": _OPTION, + "true": _TRUE, + "false": _FALSE, + "inf": _INF, + "nan": _NAN, + "repeated": _REPEATED, + "optional": _OPTIONAL, + "required": _REQUIRED, + "double": _DOUBLE, + "float": _FLOAT, + "int32": _INT32, + "int64": _INT64, + "uint32": _UINT32, + "uint64": _UINT64, + "sint32": _SINT32, + "sint64": _SINT64, + "fixed32": _FIXED32, + "fixed64": _FIXED64, + "sfixed32": _SFIXED32, + "sfixed64": _SFIXED64, + "bool": _BOOL, + "string": _STRING, + "bytes": _BYTES, + "group": _GROUP, + "oneof": _ONEOF, + "map": _MAP, + "extensions": _EXTENSIONS, + "to": _TO, + "max": _MAX, + "reserved": _RESERVED, + "enum": _ENUM, + "message": _MESSAGE, + "extend": _EXTEND, + "service": _SERVICE, + "rpc": _RPC, + "stream": _STREAM, + "returns": _RETURNS, +} + +func (l *protoLex) maybeNewLine(r rune) { + if r == '\n' { + l.info.AddLine(l.input.offset()) + } +} + +func (l *protoLex) prev() ast.SourcePos { + return l.info.SourcePos(l.prevOffset) +} + +func (l *protoLex) Lex(lval *protoSymType) int { + if l.handler.ReporterError() != nil { + // if error reporter already returned non-nil error, + // we can skip the rest of the input + return 0 + } + + l.comments = nil + + for { + l.input.setMark() + + l.prevOffset = l.input.offset() + c, _, err := l.input.readRune() + if err == io.EOF { + // we're not actually returning a rune, but this will associate + // accumulated comments as a trailing comment on last symbol + // (if appropriate) + l.setRune(lval, 0) + l.eof = lval.b.Token() + return 0 + } + if err != nil { + l.setError(lval, err) + return _ERROR + } + + if strings.ContainsRune("\n\r\t\f\v ", c) { + // skip whitespace + l.maybeNewLine(c) + continue + } + + if c == '.' { + // decimal literals could start with a dot + cn, szn, err := l.input.readRune() + if err != nil { + l.setRune(lval, c) + return int(c) + } + if cn >= '0' && cn <= '9' { + l.readNumber() + token := l.input.getMark() + f, err := parseFloat(token) + if err != nil { + l.setError(lval, numError(err, "float", token)) + return _ERROR + } + l.setFloat(lval, f) + return _FLOAT_LIT + } + l.input.unreadRune(szn) + l.setRune(lval, c) + return int(c) + } + + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') { + // identifier + l.readIdentifier() + str := l.input.getMark() + if t, ok := keywords[str]; ok { + l.setIdent(lval, str) + return t + } + l.setIdent(lval, str) + return _NAME + } + + if c >= '0' && c <= '9' { + // integer or float literal + l.readNumber() + token := l.input.getMark() + if strings.HasPrefix(token, "0x") || strings.HasPrefix(token, "0X") { + // hexadecimal + ui, err := strconv.ParseUint(token[2:], 16, 64) + if err != nil { + l.setError(lval, numError(err, "hexadecimal integer", token[2:])) + return _ERROR + } + l.setInt(lval, ui) + return _INT_LIT + } + if strings.ContainsAny(token, ".eE") { + // floating point! + f, err := parseFloat(token) + if err != nil { + l.setError(lval, numError(err, "float", token)) + return _ERROR + } + l.setFloat(lval, f) + return _FLOAT_LIT + } + // integer! (decimal or octal) + base := 10 + if token[0] == '0' { + base = 8 + } + ui, err := strconv.ParseUint(token, base, 64) + if err != nil { + kind := "integer" + if base == 8 { + kind = "octal integer" + } else if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange { + // if it's too big to be an int, parse it as a float + var f float64 + kind = "float" + f, err = parseFloat(token) + if err == nil { + l.setFloat(lval, f) + return _FLOAT_LIT + } + } + l.setError(lval, numError(err, kind, token)) + return _ERROR + } + l.setInt(lval, ui) + return _INT_LIT + } + + if c == '\'' || c == '"' { + // string literal + str, err := l.readStringLiteral(c) + if err != nil { + l.setError(lval, err) + return _ERROR + } + l.setString(lval, str) + return _STRING_LIT + } + + if c == '/' { + // comment + cn, szn, err := l.input.readRune() + if err != nil { + l.setRune(lval, '/') + return int(c) + } + if cn == '/' { + if hasErr := l.skipToEndOfLineComment(lval); hasErr { + return _ERROR + } + l.comments = append(l.comments, l.newToken()) + continue + } + if cn == '*' { + ok, hasErr := l.skipToEndOfBlockComment(lval) + if hasErr { + return _ERROR + } + if !ok { + l.setError(lval, errors.New("block comment never terminates, unexpected EOF")) + return _ERROR + } + l.comments = append(l.comments, l.newToken()) + continue + } + l.input.unreadRune(szn) + } + + if c < 32 || c == 127 { + l.setError(lval, errors.New("invalid control character")) + return _ERROR + } + if !strings.ContainsRune(";,.:=-+(){}[]<>/", c) { + l.setError(lval, errors.New("invalid character")) + return _ERROR + } + l.setRune(lval, c) + return int(c) + } +} + +func parseFloat(token string) (float64, error) { + // strconv.ParseFloat allows _ to separate digits, but protobuf does not + if strings.ContainsRune(token, '_') { + return 0, &strconv.NumError{ + Func: "parseFloat", + Num: token, + Err: strconv.ErrSyntax, + } + } + f, err := strconv.ParseFloat(token, 64) + if err == nil { + return f, nil + } + if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange && math.IsInf(f, 1) { + // protoc doesn't complain about float overflow and instead just uses "infinity" + // so we mirror that behavior by just returning infinity and ignoring the error + return f, nil + } + return f, err +} + +func (l *protoLex) newToken() ast.Token { + offset := l.input.mark + length := l.input.pos - l.input.mark + return l.info.AddToken(offset, length) +} + +func (l *protoLex) setPrevAndAddComments(n ast.TerminalNode) { + comments := l.comments + l.comments = nil + var prevTrailingComments []ast.Token + if l.prevSym != nil && len(comments) > 0 { + prevEnd := l.info.NodeInfo(l.prevSym).End().Line + info := l.info.NodeInfo(n) + nStart := info.Start().Line + if nStart == prevEnd { + if rn, ok := n.(*ast.RuneNode); ok && rn.Rune == 0 { + // if current token is EOF, pretend its on separate line + // so that the logic below can attribute a final trailing + // comment to the previous token + nStart++ + } + } + c := comments[0] + commentInfo := l.info.TokenInfo(c) + commentStart := commentInfo.Start().Line + if nStart > prevEnd && commentStart == prevEnd { + // Comment starts right after the previous token. If it's a + // line comment, we record that as a trailing comment. + // + // But if it's a block comment, it is only a trailing comment + // if there are multiple comments or if the block comment ends + // on a line before n. + canDonate := strings.HasPrefix(commentInfo.RawText(), "//") || + len(comments) > 1 || commentInfo.End().Line < nStart + + if canDonate { + prevTrailingComments = comments[:1] + comments = comments[1:] + } + } + } + + // now we can associate comments + for _, c := range prevTrailingComments { + l.info.AddComment(c, l.prevSym.Token()) + } + for _, c := range comments { + l.info.AddComment(c, n.Token()) + } + + l.prevSym = n +} + +func (l *protoLex) setString(lval *protoSymType, val string) { + lval.s = ast.NewStringLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.s) +} + +func (l *protoLex) setIdent(lval *protoSymType, val string) { + lval.id = ast.NewIdentNode(val, l.newToken()) + l.setPrevAndAddComments(lval.id) +} + +func (l *protoLex) setInt(lval *protoSymType, val uint64) { + lval.i = ast.NewUintLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.i) +} + +func (l *protoLex) setFloat(lval *protoSymType, val float64) { + lval.f = ast.NewFloatLiteralNode(val, l.newToken()) + l.setPrevAndAddComments(lval.f) +} + +func (l *protoLex) setRune(lval *protoSymType, val rune) { + lval.b = ast.NewRuneNode(val, l.newToken()) + l.setPrevAndAddComments(lval.b) +} + +func (l *protoLex) setError(lval *protoSymType, err error) { + lval.err, _ = l.addSourceError(err) +} + +func (l *protoLex) readNumber() { + allowExpSign := false + for { + c, sz, err := l.input.readRune() + if err != nil { + break + } + if (c == '-' || c == '+') && !allowExpSign { + l.input.unreadRune(sz) + break + } + allowExpSign = false + if c != '.' && c != '_' && (c < '0' || c > '9') && + (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && + c != '-' && c != '+' { + // no more chars in the number token + l.input.unreadRune(sz) + break + } + if c == 'e' || c == 'E' { + // scientific notation char can be followed by + // an exponent sign + allowExpSign = true + } + } +} + +func numError(err error, kind, s string) error { + ne, ok := err.(*strconv.NumError) + if !ok { + return err + } + if ne.Err == strconv.ErrRange { + return fmt.Errorf("value out of range for %s: %s", kind, s) + } + // syntax error + return fmt.Errorf("invalid syntax in %s value: %s", kind, s) +} + +func (l *protoLex) readIdentifier() { + for { + c, sz, err := l.input.readRune() + if err != nil { + break + } + if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') { + l.input.unreadRune(sz) + break + } + } +} + +func (l *protoLex) readStringLiteral(quote rune) (string, error) { + var buf bytes.Buffer + var escapeError reporter.ErrorWithPos + var noMoreErrors bool + reportErr := func(msg, badEscape string) { + if noMoreErrors { + return + } + if escapeError != nil { + // report previous one + _, ok := l.addSourceError(escapeError) + if !ok { + noMoreErrors = true + } + } + var err error + if strings.HasSuffix(msg, "%s") { + err = fmt.Errorf(msg, badEscape) + } else { + err = errors.New(msg) + } + // we've now consumed the bad escape and lexer position is after it, so we need + // to back up to the beginning of the escape to report the correct position + escapeError = l.errWithCurrentPos(err, -len(badEscape)) + } + for { + c, _, err := l.input.readRune() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return "", err + } + if c == '\n' { + return "", errors.New("encountered end-of-line before end of string literal") + } + if c == quote { + break + } + if c == 0 { + reportErr("null character ('\\0') not allowed in string literal", string(rune(0))) + continue + } + if c == '\\' { + // escape sequence + c, _, err = l.input.readRune() + if err != nil { + return "", err + } + switch { + case c == 'x' || c == 'X': + // hex escape + c1, sz1, err := l.input.readRune() + if err != nil { + return "", err + } + if c1 == quote || c1 == '\\' { + l.input.unreadRune(sz1) + reportErr("invalid hex escape: %s", "\\"+string(c)) + continue + } + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + var hex string + if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') { + l.input.unreadRune(sz2) + hex = string(c1) + } else { + hex = string([]rune{c1, c2}) + } + i, err := strconv.ParseInt(hex, 16, 32) + if err != nil { + reportErr("invalid hex escape: %s", "\\"+string(c)+hex) + continue + } + buf.WriteByte(byte(i)) + case c >= '0' && c <= '7': + // octal escape + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + var octal string + if c2 < '0' || c2 > '7' { + l.input.unreadRune(sz2) + octal = string(c) + } else { + c3, sz3, err := l.input.readRune() + if err != nil { + return "", err + } + if c3 < '0' || c3 > '7' { + l.input.unreadRune(sz3) + octal = string([]rune{c, c2}) + } else { + octal = string([]rune{c, c2, c3}) + } + } + i, err := strconv.ParseInt(octal, 8, 32) + if err != nil { + reportErr("invalid octal escape: %s", "\\"+octal) + continue + } + if i > 0xff { + reportErr("octal escape is out range, must be between 0 and 377: %s", "\\"+octal) + continue + } + buf.WriteByte(byte(i)) + case c == 'u': + // short unicode escape + u := make([]rune, 4) + for i := range u { + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 + } + codepointStr := string(u) + if len(u) < 4 { + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue + } + i, err := strconv.ParseInt(codepointStr, 16, 32) + if err != nil { + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue + } + buf.WriteRune(rune(i)) + case c == 'U': + // long unicode escape + u := make([]rune, 8) + for i := range u { + c2, sz2, err := l.input.readRune() + if err != nil { + return "", err + } + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 + } + codepointStr := string(u) + if len(u) < 8 { + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue + } + i, err := strconv.ParseInt(string(u), 16, 32) + if err != nil { + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue + } + if i > 0x10ffff || i < 0 { + reportErr("unicode escape is out of range, must be between 0 and 0x10ffff: %s", "\\U"+codepointStr) + continue + } + buf.WriteRune(rune(i)) + case c == 'a': + buf.WriteByte('\a') + case c == 'b': + buf.WriteByte('\b') + case c == 'f': + buf.WriteByte('\f') + case c == 'n': + buf.WriteByte('\n') + case c == 'r': + buf.WriteByte('\r') + case c == 't': + buf.WriteByte('\t') + case c == 'v': + buf.WriteByte('\v') + case c == '\\': + buf.WriteByte('\\') + case c == '\'': + buf.WriteByte('\'') + case c == '"': + buf.WriteByte('"') + case c == '?': + buf.WriteByte('?') + default: + reportErr("invalid escape sequence: %s", "\\"+string(c)) + continue + } + } else { + buf.WriteRune(c) + } + } + if escapeError != nil { + return "", escapeError + } + return buf.String(), nil +} + +func (l *protoLex) skipToEndOfLineComment(lval *protoSymType) (hasErr bool) { + for { + c, sz, err := l.input.readRune() + if err != nil { + // eof + return false + } + switch c { + case '\n': + // don't include newline in the comment + l.input.unreadRune(sz) + return false + case 0: + l.setError(lval, errors.New("invalid control character")) + return true + } + } +} + +func (l *protoLex) skipToEndOfBlockComment(lval *protoSymType) (ok, hasErr bool) { + for { + c, _, err := l.input.readRune() + if err != nil { + return false, false + } + if c == 0 { + l.setError(lval, errors.New("invalid control character")) + return false, true + } + l.maybeNewLine(c) + if c == '*' { + c, sz, err := l.input.readRune() + if err != nil { + return false, false + } + if c == '/' { + return true, false + } + l.input.unreadRune(sz) + } + } +} + +func (l *protoLex) addSourceError(err error) (reporter.ErrorWithPos, bool) { + ewp, ok := err.(reporter.ErrorWithPos) + if !ok { + // TODO: Store the previous span instead of just the position. + ewp = reporter.Error(ast.NewSourceSpan(l.prev(), l.prev()), err) + } + handlerErr := l.handler.HandleError(ewp) + return ewp, handlerErr == nil +} + +func (l *protoLex) Error(s string) { + _, _ = l.addSourceError(errors.New(s)) +} + +// TODO: Accept both a start and end offset, and use that to create a span. +func (l *protoLex) errWithCurrentPos(err error, offset int) reporter.ErrorWithPos { + if ewp, ok := err.(reporter.ErrorWithPos); ok { + return ewp + } + pos := l.info.SourcePos(l.input.offset() + offset) + return reporter.Error(ast.NewSourceSpan(pos, pos), err) +} + +func (l *protoLex) requireSemicolon(semicolons []*ast.RuneNode) (*ast.RuneNode, []*ast.RuneNode) { + if len(semicolons) == 0 { + l.Error("syntax error: expecting ';'") + return nil, nil + } + return semicolons[0], semicolons[1:] +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/parser.go b/vendor/github.com/bufbuild/protocompile/parser/parser.go new file mode 100644 index 00000000..21314d5a --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/parser.go @@ -0,0 +1,201 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "io" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" +) + +// The path ../.tmp/bin/goyacc is built when using `make generate` from repo root. +//go:generate ../.tmp/bin/goyacc -o proto.y.go -l -p proto proto.y + +func init() { + protoErrorVerbose = true + + // fix up the generated "token name" array so that error messages are nicer + setTokenName(_STRING_LIT, "string literal") + setTokenName(_INT_LIT, "int literal") + setTokenName(_FLOAT_LIT, "float literal") + setTokenName(_NAME, "identifier") + setTokenName(_ERROR, "error") + // for keywords, just show the keyword itself wrapped in quotes + for str, i := range keywords { + setTokenName(i, fmt.Sprintf(`"%s"`, str)) + } +} + +func setTokenName(token int, text string) { + // NB: this is based on logic in generated parse code that translates the + // int returned from the lexer into an internal token number. + var intern int8 + if token < len(protoTok1) { + intern = protoTok1[token] + } else { + if token >= protoPrivate { + if token < protoPrivate+len(protoTok2) { + intern = protoTok2[token-protoPrivate] + } + } + if intern == 0 { + for i := 0; i+1 < len(protoTok3); i += 2 { + if int(protoTok3[i]) == token { + intern = protoTok3[i+1] + break + } + } + } + } + + if intern >= 1 && int(intern-1) < len(protoToknames) { + protoToknames[intern-1] = text + return + } + + panic(fmt.Sprintf("Unknown token value: %d", token)) +} + +// Parse parses the given source code info and returns an AST. The given filename +// is used to construct error messages and position information. The given reader +// supplies the source code. The given handler is used to report errors and +// warnings encountered while parsing. If any errors are reported, this function +// returns a non-nil error. +// +// If the error returned is due to a syntax error in the source, then a non-nil +// AST is also returned. If the handler chooses to not abort the parse (e.g. the +// underlying error reporter returns nil instead of an error), the parser will +// attempt to recover and keep going. This allows multiple syntax errors to be +// reported in a single pass. And it also means that more of the AST can be +// populated (erroneous productions around the syntax error will of course be +// absent). +// +// The degree to which the parser can recover from errors and populate the AST +// depends on the nature of the syntax error and if there are any tokens after the +// syntax error that can help the parser recover. This error recovery and partial +// AST production is best effort. +func Parse(filename string, r io.Reader, handler *reporter.Handler) (*ast.FileNode, error) { + lx, err := newLexer(r, filename, handler) + if err != nil { + return nil, err + } + protoParse(lx) + if lx.res == nil { + // nil AST means there was an error that prevented any parsing + // or the file was empty; synthesize empty non-nil AST + lx.res = ast.NewEmptyFileNode(filename) + } + return lx.res, handler.Error() +} + +// Result is the result of constructing a descriptor proto from a parsed AST. +// From this result, the AST and the file descriptor proto can be had. This +// also contains numerous lookup functions, for looking up AST nodes that +// correspond to various elements of the descriptor hierarchy. +// +// Results can be created without AST information, using the ResultWithoutAST() +// function. All functions other than AST() will still return non-nil values, +// allowing compile operations to work with files that have only intermediate +// descriptor protos and no source code. For such results, the function that +// return AST nodes will return placeholder nodes. The position information for +// placeholder nodes contains only the filename. +type Result interface { + // AST returns the parsed abstract syntax tree. This returns nil if the + // Result was created without an AST. + AST() *ast.FileNode + // FileDescriptorProto returns the file descriptor proto. + FileDescriptorProto() *descriptorpb.FileDescriptorProto + + // FileNode returns the root of the AST. If this result has no AST then a + // placeholder node is returned. + FileNode() ast.FileDeclNode + // Node returns the AST node from which the given message was created. This + // can return nil, such as if the given message is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + Node(proto.Message) ast.Node + // OptionNode returns the AST node corresponding to the given uninterpreted + // option. This can return nil, such as if the given option is not part of + // the FileDescriptorProto hierarchy. If this result has no AST, this + // returns a placeholder node. + OptionNode(*descriptorpb.UninterpretedOption) ast.OptionDeclNode + // OptionNamePartNode returns the AST node corresponding to the given name + // part for an uninterpreted option. This can return nil, such as if the + // given name part is not part of the FileDescriptorProto hierarchy. If this + // result has no AST, this returns a placeholder node. + OptionNamePartNode(*descriptorpb.UninterpretedOption_NamePart) ast.Node + // MessageNode returns the AST node corresponding to the given message. This + // can return nil, such as if the given message is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + MessageNode(*descriptorpb.DescriptorProto) ast.MessageDeclNode + // FieldNode returns the AST node corresponding to the given field. This can + // return nil, such as if the given field is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + FieldNode(*descriptorpb.FieldDescriptorProto) ast.FieldDeclNode + // OneofNode returns the AST node corresponding to the given oneof. This can + // return nil, such as if the given oneof is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + OneofNode(*descriptorpb.OneofDescriptorProto) ast.OneofDeclNode + // ExtensionRangeNode returns the AST node corresponding to the given + // extension range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + ExtensionRangeNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode + + // ExtensionsNode returns the AST node corresponding to the "extensions" + // statement in a message that corresponds to the given range. This will be + // the parent of the node returned by ExtensionRangeNode, which contains the + // options that apply to all child ranges. + ExtensionsNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions + + // MessageReservedRangeNode returns the AST node corresponding to the given + // reserved range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + MessageReservedRangeNode(*descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode + // EnumNode returns the AST node corresponding to the given enum. This can + // return nil, such as if the given enum is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + EnumNode(*descriptorpb.EnumDescriptorProto) ast.NodeWithOptions + // EnumValueNode returns the AST node corresponding to the given enum. This + // can return nil, such as if the given enum value is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + EnumValueNode(*descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode + // EnumReservedRangeNode returns the AST node corresponding to the given + // reserved range. This can return nil, such as if the given range is not + // part of the FileDescriptorProto hierarchy. If this result has no AST, + // this returns a placeholder node. + EnumReservedRangeNode(*descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode + // ServiceNode returns the AST node corresponding to the given service. This + // can return nil, such as if the given service is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + ServiceNode(*descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions + // MethodNode returns the AST node corresponding to the given method. This + // can return nil, such as if the given method is not part of the + // FileDescriptorProto hierarchy. If this result has no AST, this returns a + // placeholder node. + MethodNode(*descriptorpb.MethodDescriptorProto) ast.RPCDeclNode +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y b/vendor/github.com/bufbuild/protocompile/parser/proto.y new file mode 100644 index 00000000..e66cabda --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y @@ -0,0 +1,1498 @@ +%{ +package parser + +//lint:file-ignore SA4006 generated parser has unused values + +import ( + "math" + "strings" + + "github.com/bufbuild/protocompile/ast" +) + +%} + +// fields inside this union end up as the fields in a structure known +// as ${PREFIX}SymType, of which a reference is passed to the lexer. +%union{ + file *ast.FileNode + syn *ast.SyntaxNode + ed *ast.EditionNode + fileElements []ast.FileElement + pkg nodeWithRunes[*ast.PackageNode] + imprt nodeWithRunes[*ast.ImportNode] + msg nodeWithRunes[*ast.MessageNode] + msgElements []ast.MessageElement + fld *ast.FieldNode + msgFld nodeWithRunes[*ast.FieldNode] + mapFld nodeWithRunes[*ast.MapFieldNode] + mapType *ast.MapTypeNode + grp *ast.GroupNode + msgGrp nodeWithRunes[*ast.GroupNode] + oo nodeWithRunes[*ast.OneofNode] + ooElement ast.OneofElement + ooElements []ast.OneofElement + ext nodeWithRunes[*ast.ExtensionRangeNode] + resvd nodeWithRunes[*ast.ReservedNode] + en nodeWithRunes[*ast.EnumNode] + enElements []ast.EnumElement + env nodeWithRunes[*ast.EnumValueNode] + extend nodeWithRunes[*ast.ExtendNode] + extElement ast.ExtendElement + extElements []ast.ExtendElement + svc nodeWithRunes[*ast.ServiceNode] + svcElements []ast.ServiceElement + mtd nodeWithRunes[*ast.RPCNode] + mtdMsgType *ast.RPCTypeNode + mtdElements []ast.RPCElement + optRaw *ast.OptionNode + opt nodeWithRunes[*ast.OptionNode] + opts *compactOptionSlices + refRaw *ast.FieldReferenceNode + ref nodeWithRunes[*ast.FieldReferenceNode] + optNms *fieldRefSlices + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeSlices + names *nameSlices + cidPart nodeWithRunes[*ast.IdentNode] + cid *identSlices + tid ast.IdentValueNode + sl *valueSlices + msgLitFlds *messageFieldList + msgLitFld *ast.MessageFieldNode + v ast.ValueNode + il ast.IntValueNode + str []*ast.StringLiteralNode + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + bs []*ast.RuneNode + err error +} + +// any non-terminal which returns a value needs a type, which is +// really a field name in the above union struct +%type file +%type syntaxDecl +%type editionDecl +%type fileBody fileElement fileElements +%type importDecl +%type packageDecl +%type compactOption oneofOptionDecl +%type optionDecl compactOptionEntry compactOptionFinal +%type compactOptionDecls compactOptionLeadingDecls +%type extensionName messageLiteralFieldName optionNamePart +%type optionNameEntry optionNameFinal +%type optionName optionNameLeading +%type compactOptions +%type fieldValue optionValue scalarValue fieldScalarValue messageLiteralWithBraces messageLiteral numLit specialFloatLit listLiteral listElement listOfMessagesLiteral messageValue +%type enumValueNumber +%type identifier mapKeyType msgElementName extElementName oneofElementName notGroupElementName mtdElementName enumValueName fieldCardinality +%type qualifiedIdentifierEntry qualifiedIdentifierFinal mtdElementIdentEntry mtdElementIdentFinal +%type qualifiedIdentifier msgElementIdent extElementIdent oneofElementIdent notGroupElementIdent mtdElementIdent qualifiedIdentifierDot qualifiedIdentifierLeading mtdElementIdentLeading +%type typeName msgElementTypeIdent extElementTypeIdent oneofElementTypeIdent notGroupElementTypeIdent mtdElementTypeIdent +%type listElements messageLiterals +%type messageLiteralFieldEntry messageLiteralFields messageTextFormat +%type messageLiteralField +%type messageFieldDecl +%type oneofFieldDecl extensionFieldDecl +%type oneofDecl +%type groupDecl oneofGroupDecl +%type messageGroupDecl +%type mapFieldDecl +%type mapType +%type messageDecl +%type messageElement messageElements messageBody +%type oneofElement +%type oneofElements oneofBody +%type fieldNameStrings fieldNameIdents +%type msgReserved enumReserved reservedNames +%type tagRange enumValueRange +%type tagRanges enumValueRanges +%type extensionRangeDecl +%type enumDecl +%type enumElement enumElements enumBody +%type enumValueDecl +%type extensionDecl +%type extensionElement +%type extensionElements extensionBody +%type stringLit +%type serviceDecl +%type serviceElement serviceElements serviceBody +%type methodDecl +%type methodElement methodElements methodBody +%type methodMessageType +%type semicolon +%type semicolons semicolonList + +// same for terminals +%token _STRING_LIT +%token _INT_LIT +%token _FLOAT_LIT +%token _NAME +%token _SYNTAX _EDITION _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED +%token _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64 +%token _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND +%token _SERVICE _RPC _STREAM _RETURNS +%token _ERROR +// we define all of these, even ones that aren't used, to improve error messages +// so it shows the unexpected symbol instead of showing "$unk" +%token '=' ';' ':' '{' '}' '\\' '/' '?' '.' ',' '>' '<' '+' '-' '(' ')' '[' ']' '*' '&' '^' '%' '$' '#' '@' '!' '~' '`' + +%% + +file : syntaxDecl { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, $1, nil, lex.eof) + lex.res = $$ + } + | editionDecl { + lex := protolex.(*protoLex) + $$ = ast.NewFileNodeWithEdition(lex.info, $1, nil, lex.eof) + lex.res = $$ + } + | fileBody { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, nil, $1, lex.eof) + lex.res = $$ + } + | syntaxDecl fileBody { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, $1, $2, lex.eof) + lex.res = $$ + } + | editionDecl fileBody { + lex := protolex.(*protoLex) + $$ = ast.NewFileNodeWithEdition(lex.info, $1, $2, lex.eof) + lex.res = $$ + } + | { + lex := protolex.(*protoLex) + $$ = ast.NewFileNode(lex.info, nil, nil, lex.eof) + lex.res = $$ + } + +fileBody : semicolons fileElements { + $$ = prependRunes(toFileElement, $1, $2) + } + +fileElements : fileElements fileElement { + $$ = append($1, $2...) + } + | fileElement { + $$ = $1 + } + +fileElement : importDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | packageDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | optionDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | messageDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | enumDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | extensionDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | serviceDecl { + $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) + } + | error { + $$ = nil + } + +semicolonList : ';' { + $$ = []*ast.RuneNode{$1} + } + | semicolonList ';' { + $$ = append($1, $2) + } + +semicolons : semicolonList { + $$ = $1 + } + | { + $$ = nil + } + +semicolon : ';' { + $$ = $1 + } | + { + protolex.(*protoLex).Error("syntax error: expecting ';'") + $$ = nil + } + +syntaxDecl : _SYNTAX '=' stringLit ';' { + $$ = ast.NewSyntaxNode($1.ToKeyword(), $2, toStringValueNode($3), $4) + } + +editionDecl : _EDITION '=' stringLit ';' { + $$ = ast.NewEditionNode($1.ToKeyword(), $2, toStringValueNode($3), $4) + } + +importDecl : _IMPORT stringLit semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), nil, nil, toStringValueNode($2), semi), extra...) + } + | _IMPORT _WEAK stringLit semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), nil, $2.ToKeyword(), toStringValueNode($3), semi), extra...) + } + | _IMPORT _PUBLIC stringLit semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), $2.ToKeyword(), nil, toStringValueNode($3), semi), extra...) + } + +packageDecl : _PACKAGE qualifiedIdentifier semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewPackageNode($1.ToKeyword(), $2.toIdentValueNode(nil), semi), extra...) + } + +qualifiedIdentifier : identifier { + $$ = &identSlices{idents: []*ast.IdentNode{$1}} + } + | qualifiedIdentifier '.' identifier { + $1.idents = append($1.idents, $3) + $1.dots = append($1.dots, $2) + $$ = $1 + } + +qualifiedIdentifierDot : qualifiedIdentifierFinal { + $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} + } + | qualifiedIdentifierLeading qualifiedIdentifierFinal { + $1.idents = append($1.idents, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +qualifiedIdentifierLeading : qualifiedIdentifierEntry { + $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} + } + | qualifiedIdentifierLeading qualifiedIdentifierEntry { + $1.idents = append($1.idents, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +qualifiedIdentifierFinal : identifier { + $$ = newNodeWithRunes($1) + } + | qualifiedIdentifierEntry { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + $$ = $1 + } + +qualifiedIdentifierEntry : identifier '.' { + $$ = newNodeWithRunes($1, $2) + } + +// to mimic limitations of protoc recursive-descent parser, +// we don't allowed message statement keywords as identifiers +// (or oneof statement keywords [e.g. "option"] below) + +msgElementIdent : msgElementName { + $$ = &identSlices{idents: []*ast.IdentNode{$1}} + } + | msgElementIdent '.' identifier { + $1.idents = append($1.idents, $3) + $1.dots = append($1.dots, $2) + $$ = $1 + } + +extElementIdent : extElementName { + $$ = &identSlices{idents: []*ast.IdentNode{$1}} + } + | extElementIdent '.' identifier { + $1.idents = append($1.idents, $3) + $1.dots = append($1.dots, $2) + $$ = $1 + } + +oneofElementIdent : oneofElementName { + $$ = &identSlices{idents: []*ast.IdentNode{$1}} + } + | oneofElementIdent '.' identifier { + $1.idents = append($1.idents, $3) + $1.dots = append($1.dots, $2) + $$ = $1 + } + +notGroupElementIdent : notGroupElementName { + $$ = &identSlices{idents: []*ast.IdentNode{$1}} + } + | notGroupElementIdent '.' identifier { + $1.idents = append($1.idents, $3) + $1.dots = append($1.dots, $2) + $$ = $1 + } + +mtdElementIdent : mtdElementIdentFinal { + $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} + } + | mtdElementIdentLeading mtdElementIdentFinal { + $1.idents = append($1.idents, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +mtdElementIdentLeading : mtdElementIdentEntry { + $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} + } + | mtdElementIdentLeading mtdElementIdentEntry { + $1.idents = append($1.idents, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +mtdElementIdentFinal : mtdElementName { + $$ = newNodeWithRunes($1) + } + | mtdElementIdentEntry { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + $$ = $1 + } + +mtdElementIdentEntry : mtdElementName '.' { + $$ = newNodeWithRunes($1, $2) + } + +oneofOptionDecl : _OPTION optionName '=' optionValue semicolon { + optName := ast.NewOptionNameNode($2.refs, $2.dots) + $$ = ast.NewOptionNode($1.ToKeyword(), optName, $3, $4, $5) + } + +optionDecl : _OPTION optionName '=' optionValue semicolons { + optName := ast.NewOptionNameNode($2.refs, $2.dots) + semi, extra := protolex.(*protoLex).requireSemicolon($5) + $$ = newNodeWithRunes(ast.NewOptionNode($1.ToKeyword(), optName, $3, $4, semi), extra...) + } + +optionNamePart : identifier { + $$ = ast.NewFieldReferenceNode($1) + } + | extensionName { + $$ = $1 + } + +optionNameEntry : optionNamePart '.' { + $$ = newNodeWithRunes($1, $2) + } + +optionNameFinal : optionNamePart { + $$ = newNodeWithRunes($1) + } + | optionNameEntry { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + $$ = $1 + } + +optionNameLeading : optionNameEntry { + $$ = &fieldRefSlices{refs: []*ast.FieldReferenceNode{$1.Node}, dots: $1.Runes} + } + | optionNameLeading optionNameEntry { + $1.refs = append($1.refs, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +optionName : optionNameFinal { + $$ = &fieldRefSlices{refs: []*ast.FieldReferenceNode{$1.Node}, dots: $1.Runes} + } + | optionNameLeading optionNameFinal { + $1.refs = append($1.refs, $2.Node) + $1.dots = append($1.dots, $2.Runes...) + $$ = $1 + } + +extensionName : '(' typeName ')' { + $$ = ast.NewExtensionFieldReferenceNode($1, $2, $3) + } + +optionValue : scalarValue + | messageLiteralWithBraces + +scalarValue : stringLit { + $$ = toStringValueNode($1) + } + | numLit + | specialFloatLit + | identifier { + $$ = $1 + } + +numLit : _FLOAT_LIT { + $$ = $1 + } + | '-' _FLOAT_LIT { + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } + | _INT_LIT { + $$ = $1 + } + | '-' _INT_LIT { + if $2.Val > math.MaxInt64 + 1 { + // can't represent as int so treat as float literal + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } else { + $$ = ast.NewNegativeIntLiteralNode($1, $2) + } + } + +specialFloatLit : '-' _INF { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + | '-' _NAN { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + +stringLit : _STRING_LIT { + $$ = []*ast.StringLiteralNode{$1} + } + | stringLit _STRING_LIT { + $$ = append($1, $2) + } + +messageLiteralWithBraces : '{' messageTextFormat '}' { + if $2 == nil { + $$ = ast.NewMessageLiteralNode($1, nil, nil, $3) + } else { + fields, delimiters := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delimiters, $3) + } + } + | '{' '}' { + $$ = ast.NewMessageLiteralNode($1, nil, nil, $2) + } + +messageTextFormat : messageLiteralFields + +messageLiteralFields : messageLiteralFieldEntry + | messageLiteralFieldEntry messageLiteralFields { + if $1 != nil { + $1.next = $2 + $$ = $1 + } else { + $$ = $2 + } + } + +messageLiteralFieldEntry : messageLiteralField { + if $1 != nil { + $$ = &messageFieldList{field: $1} + } else { + $$ = nil + } + } + | messageLiteralField ',' { + if $1 != nil { + $$ = &messageFieldList{field: $1, delimiter: $2} + } else { + $$ = nil + } + } + | messageLiteralField ';' { + if $1 != nil { + $$ = &messageFieldList{field: $1, delimiter: $2} + } else { + $$ = nil + } + } + | error ',' { + $$ = nil + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +messageLiteralField : messageLiteralFieldName ':' fieldValue { + if $1 != nil && $2 != nil { + $$ = ast.NewMessageFieldNode($1, $2, $3) + } else { + $$ = nil + } + } + | messageLiteralFieldName messageValue { + if $1 != nil && $2 != nil { + $$ = ast.NewMessageFieldNode($1, nil, $2) + } else { + $$ = nil + } + } + | error ':' fieldValue { + $$ = nil + } + +messageLiteralFieldName : identifier { + $$ = ast.NewFieldReferenceNode($1) + } + | '[' qualifiedIdentifierDot ']' { + $$ = ast.NewExtensionFieldReferenceNode($1, $2.toIdentValueNode(nil), $3) + } + | '[' qualifiedIdentifierDot '/' qualifiedIdentifierDot ']' { + $$ = ast.NewAnyTypeReferenceNode($1, $2.toIdentValueNode(nil), $3, $4.toIdentValueNode(nil), $5) + } + | '[' error ']' { + $$ = nil + } + +fieldValue : fieldScalarValue + | messageLiteral + | listLiteral + +fieldScalarValue : stringLit { + $$ = toStringValueNode($1) + } + | numLit + | '-' identifier { + kw := $2.ToKeyword() + switch strings.ToLower(kw.Val) { + case "inf", "infinity", "nan": + // these are acceptable + default: + // anything else is not + protolex.(*protoLex).Error(`only identifiers "inf", "infinity", or "nan" may appear after negative sign`) + } + // we'll validate the identifier later + f := ast.NewSpecialFloatLiteralNode(kw) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + | identifier { + $$ = $1 + } + +messageValue : messageLiteral + | listOfMessagesLiteral + +messageLiteral : messageLiteralWithBraces + | '<' messageTextFormat '>' { + if $2 == nil { + $$ = ast.NewMessageLiteralNode($1, nil, nil, $3) + } else { + fields, delimiters := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delimiters, $3) + } + } + | '<' '>' { + $$ = ast.NewMessageLiteralNode($1, nil, nil, $2) + } + +listLiteral : '[' listElements ']' { + if $2 == nil { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + } else { + $$ = ast.NewArrayLiteralNode($1, $2.vals, $2.commas, $3) + } + } + | '[' ']' { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $2) + } + | '[' error ']' { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + } + +listElements : listElement { + $$ = &valueSlices{vals: []ast.ValueNode{$1}} + } + | listElements ',' listElement { + $1.vals = append($1.vals, $3) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +listElement : fieldScalarValue + | messageLiteral + +listOfMessagesLiteral : '[' messageLiterals ']' { + if $2 == nil { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + } else { + $$ = ast.NewArrayLiteralNode($1, $2.vals, $2.commas, $3) + } + } + | '[' ']' { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $2) + } + | '[' error ']' { + $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + } + +messageLiterals : messageLiteral { + $$ = &valueSlices{vals: []ast.ValueNode{$1}} + } + | messageLiterals ',' messageLiteral { + $1.vals = append($1.vals, $3) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +typeName : qualifiedIdentifierDot { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifierDot { + $$ = $2.toIdentValueNode($1) + } + +msgElementTypeIdent : msgElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifier { + $$ = $2.toIdentValueNode($1) + } + +extElementTypeIdent : extElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifier { + $$ = $2.toIdentValueNode($1) + } + +oneofElementTypeIdent : oneofElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifier { + $$ = $2.toIdentValueNode($1) + } + +notGroupElementTypeIdent : notGroupElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifier { + $$ = $2.toIdentValueNode($1) + } + +mtdElementTypeIdent : mtdElementIdent { + $$ = $1.toIdentValueNode(nil) + } + | '.' qualifiedIdentifierDot { + $$ = $2.toIdentValueNode($1) + } + +fieldCardinality : _REQUIRED + | _OPTIONAL + | _REPEATED + +compactOptions : '[' compactOptionDecls ']' { + $$ = ast.NewCompactOptionsNode($1, $2.options, $2.commas, $3) + } + | '[' ']' { + protolex.(*protoLex).Error("compact options must have at least one option") + $$ = ast.NewCompactOptionsNode($1, nil, nil, $2) + } + +compactOptionDecls : compactOptionFinal { + $$ = &compactOptionSlices{options: []*ast.OptionNode{$1.Node}, commas: $1.Runes} + } + | compactOptionLeadingDecls compactOptionFinal { + $1.options = append($1.options, $2.Node) + $1.commas = append($1.commas, $2.Runes...) + $$ = $1 + } + +compactOptionLeadingDecls : compactOptionEntry { + $$ = &compactOptionSlices{options: []*ast.OptionNode{$1.Node}, commas: $1.Runes} + } + | compactOptionLeadingDecls compactOptionEntry { + $1.options = append($1.options, $2.Node) + $1.commas = append($1.commas, $2.Runes...) + $$ = $1 + } + +compactOptionFinal : compactOption { + $$ = newNodeWithRunes($1) + } + | compactOptionEntry { + protolex.(*protoLex).Error("syntax error: unexpected ','") + $$ = $1 + } + +compactOptionEntry : compactOption ',' { + $$ = newNodeWithRunes($1, $2) + } + +compactOption : optionName '=' optionValue { + optName := ast.NewOptionNameNode($1.refs, $1.dots) + $$ = ast.NewCompactOptionNode(optName, $2, $3) + } + | optionName { + optName := ast.NewOptionNameNode($1.refs, $1.dots) + protolex.(*protoLex).Error("compact option must have a value") + $$ = ast.NewCompactOptionNode(optName, nil, nil) + } + + +groupDecl : fieldCardinality _GROUP identifier '=' _INT_LIT '{' messageBody '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) + } + | fieldCardinality _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) + } + +messageGroupDecl : fieldCardinality _GROUP identifier '=' _INT_LIT '{' messageBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8), $9...) + } + | fieldCardinality _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9), $10...) + } + | fieldCardinality _GROUP identifier '{' messageBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, nil, nil, nil, $4, $5, $6), $7...) + } + | fieldCardinality _GROUP identifier compactOptions '{' messageBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, nil, nil, $4, $5, $6, $7), $8...) + } + +oneofDecl : _ONEOF identifier '{' oneofBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewOneofNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + } + +oneofBody : { + $$ = nil + } + | oneofElements + +oneofElements : oneofElements oneofElement { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | oneofElement { + if $1 != nil { + $$ = []ast.OneofElement{$1} + } else { + $$ = nil + } + } + +oneofElement : oneofOptionDecl { + $$ = $1 + } + | oneofFieldDecl { + $$ = $1 + } + | oneofGroupDecl { + $$ = $1 + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +oneofFieldDecl : oneofElementTypeIdent identifier '=' _INT_LIT semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | oneofElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) + } + | oneofElementTypeIdent identifier semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, nil, nil, nil, $3) + } + | oneofElementTypeIdent identifier compactOptions semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, nil, nil, $3, $4) + } + +oneofGroupDecl : _GROUP identifier '=' _INT_LIT '{' messageBody '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, nil, $5, $6, $7) + } + | _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, $5, $6, $7, $8) + } + | _GROUP identifier '{' messageBody '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, nil, nil, nil, $3, $4, $5) + } + | _GROUP identifier compactOptions '{' messageBody '}' { + $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, nil, nil, $3, $4, $5, $6) + } + + +mapFieldDecl : mapType identifier '=' _INT_LIT semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($5) + $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, $3, $4, nil, semi), extra...) + } + | mapType identifier '=' _INT_LIT compactOptions semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($6) + $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, $3, $4, $5, semi), extra...) + } + | mapType identifier semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, nil, nil, nil, semi), extra...) + } + | mapType identifier compactOptions semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, nil, nil, $3, semi), extra...) + } + +mapType : _MAP '<' mapKeyType ',' typeName '>' { + $$ = ast.NewMapTypeNode($1.ToKeyword(), $2, $3, $4, $5, $6) + } + +mapKeyType : _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + +extensionRangeDecl : _EXTENSIONS tagRanges ';' semicolons { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `extensions 1 to 10` and `extensions 1` followed by `to = 10`. + $$ = newNodeWithRunes(ast.NewExtensionRangeNode($1.ToKeyword(), $2.ranges, $2.commas, nil, $3), $4...) + } + | _EXTENSIONS tagRanges compactOptions semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewExtensionRangeNode($1.ToKeyword(), $2.ranges, $2.commas, $3, semi), extra...) + } + +tagRanges : tagRange { + $$ = &rangeSlices{ranges: []*ast.RangeNode{$1}} + } + | tagRanges ',' tagRange { + $1.ranges = append($1.ranges, $3) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +tagRange : _INT_LIT { + $$ = ast.NewRangeNode($1, nil, nil, nil) + } + | _INT_LIT _TO _INT_LIT { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), $3, nil) + } + | _INT_LIT _TO _MAX { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) + } + +enumValueRanges : enumValueRange { + $$ = &rangeSlices{ranges: []*ast.RangeNode{$1}} + } + | enumValueRanges ',' enumValueRange { + $1.ranges = append($1.ranges, $3) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +enumValueRange : enumValueNumber { + $$ = ast.NewRangeNode($1, nil, nil, nil) + } + | enumValueNumber _TO enumValueNumber { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), $3, nil) + } + | enumValueNumber _TO _MAX { + $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) + } + +enumValueNumber : _INT_LIT { + $$ = $1 + } + | '-' _INT_LIT { + $$ = ast.NewNegativeIntLiteralNode($1, $2) + } + +msgReserved : _RESERVED tagRanges ';' semicolons { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. + $$ = newNodeWithRunes(ast.NewReservedRangesNode($1.ToKeyword(), $2.ranges, $2.commas, $3), $4...) + } + | reservedNames + +enumReserved : _RESERVED enumValueRanges ';' semicolons { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. + $$ = newNodeWithRunes(ast.NewReservedRangesNode($1.ToKeyword(), $2.ranges, $2.commas, $3), $4...) + } + | reservedNames + +reservedNames : _RESERVED fieldNameStrings semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewReservedNamesNode($1.ToKeyword(), $2.names, $2.commas, semi), extra...) + } + | _RESERVED fieldNameIdents semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewReservedIdentifiersNode($1.ToKeyword(), $2.idents, $2.commas, semi), extra...) + } + +fieldNameStrings : stringLit { + $$ = &nameSlices{names: []ast.StringValueNode{toStringValueNode($1)}} + } + | fieldNameStrings ',' stringLit { + $1.names = append($1.names, toStringValueNode($3)) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +fieldNameIdents : identifier { + $$ = &nameSlices{idents: []*ast.IdentNode{$1}} + } + | fieldNameIdents ',' identifier { + $1.idents = append($1.idents, $3) + $1.commas = append($1.commas, $2) + $$ = $1 + } + +enumDecl : _ENUM identifier '{' enumBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewEnumNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + } + +enumBody : semicolons { + $$ = prependRunes(toEnumElement, $1, nil) + } + | semicolons enumElements { + $$ = prependRunes(toEnumElement, $1, $2) + } + +enumElements : enumElements enumElement { + $$ = append($1, $2...) + } + | enumElement { + $$ = $1 + } + +enumElement : optionDecl { + $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + } + | enumValueDecl { + $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + } + | enumReserved { + $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + } + | error { + $$ = nil + } + +enumValueDecl : enumValueName '=' enumValueNumber semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewEnumValueNode($1, $2, $3, nil, semi), extra...) + } + | enumValueName '=' enumValueNumber compactOptions semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($5) + $$ = newNodeWithRunes(ast.NewEnumValueNode($1, $2, $3, $4, semi), extra...) + } + +messageDecl : _MESSAGE identifier '{' messageBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewMessageNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + } + +messageBody : semicolons { + $$ = prependRunes(toMessageElement, $1, nil) + } + | semicolons messageElements { + $$ = prependRunes(toMessageElement, $1, $2) + } + +messageElements : messageElements messageElement { + $$ = append($1, $2...) + } + | messageElement { + $$ = $1 + } + +messageElement : messageFieldDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | enumDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | messageDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | extensionDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | extensionRangeDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | messageGroupDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | optionDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | oneofDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | mapFieldDecl { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | msgReserved { + $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + } + | error { + $$ = nil + } + +messageFieldDecl : fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($6) + $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, semis), extra...) + } + | fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT compactOptions semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($7) + $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, semis), extra...) + } + | msgElementTypeIdent identifier '=' _INT_LIT semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($5) + $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, $3, $4, nil, semis), extra...) + } + | msgElementTypeIdent identifier '=' _INT_LIT compactOptions semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($6) + $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, $3, $4, $5, semis), extra...) + } + | fieldCardinality notGroupElementTypeIdent identifier semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, nil, nil, nil, semis), extra...) + } + | fieldCardinality notGroupElementTypeIdent identifier compactOptions semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($5) + $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, nil, nil, $4, semis), extra...) + } + | msgElementTypeIdent identifier semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($3) + $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, nil, nil, nil, semis), extra...) + } + | msgElementTypeIdent identifier compactOptions semicolons { + semis, extra := protolex.(*protoLex).requireSemicolon($4) + $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, nil, nil, $3, semis), extra...) + } + + +extensionDecl : _EXTEND typeName '{' extensionBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewExtendNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + } + +extensionBody : { + $$ = nil + } + | extensionElements + +extensionElements : extensionElements extensionElement { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | extensionElement { + if $1 != nil { + $$ = []ast.ExtendElement{$1} + } else { + $$ = nil + } + } + +extensionElement : extensionFieldDecl { + $$ = $1 + } + | groupDecl { + $$ = $1 + } + | error ';' { + $$ = nil + } + | error { + $$ = nil + } + +extensionFieldDecl : fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT semicolon { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) + } + | fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | extElementTypeIdent identifier '=' _INT_LIT semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | extElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) + } + +serviceDecl : _SERVICE identifier '{' serviceBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewServiceNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + } + +serviceBody : semicolons { + $$ = prependRunes(toServiceElement, $1, nil) + } + | semicolons serviceElements { + $$ = prependRunes(toServiceElement, $1, $2) + } + +serviceElements : serviceElements serviceElement { + $$ = append($1, $2...) + } + | serviceElement { + $$ = $1 + } + +// NB: doc suggests support for "stream" declaration, separate from "rpc", but +// it does not appear to be supported in protoc (doc is likely from grammar for +// Google-internal version of protoc, with support for streaming stubby) +serviceElement : optionDecl { + $$ = toElements[ast.ServiceElement](toServiceElement, $1.Node, $1.Runes) + } + | methodDecl { + $$ = toElements[ast.ServiceElement](toServiceElement, $1.Node, $1.Runes) + } + | error { + $$ = nil + } + +methodDecl : _RPC identifier methodMessageType _RETURNS methodMessageType semicolons { + semi, extra := protolex.(*protoLex).requireSemicolon($6) + $$ = newNodeWithRunes(ast.NewRPCNode($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, semi), extra...) + } + | _RPC identifier methodMessageType _RETURNS methodMessageType '{' methodBody '}' semicolons { + $$ = newNodeWithRunes(ast.NewRPCNodeWithBody($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6, $7, $8), $9...) + } + +methodMessageType : '(' _STREAM typeName ')' { + $$ = ast.NewRPCTypeNode($1, $2.ToKeyword(), $3, $4) + } + | '(' mtdElementTypeIdent ')' { + $$ = ast.NewRPCTypeNode($1, nil, $2, $3) + } + +methodBody : semicolons { + $$ = prependRunes(toMethodElement, $1, nil) + } + | semicolons methodElements { + $$ = prependRunes(toMethodElement, $1, $2) + } + +methodElements : methodElements methodElement { + $$ = append($1, $2...) + } + | methodElement { + $$ = $1 + } + +methodElement : optionDecl { + $$ = toElements[ast.RPCElement](toMethodElement, $1.Node, $1.Runes) + } + | error { + $$ = nil + } + +// excludes message, enum, oneof, extensions, reserved, extend, +// option, group, optional, required, and repeated +msgElementName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _MAP + | _TO + | _MAX + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes group, optional, required, and repeated +extElementName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes reserved, option +enumValueName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes group, option, optional, required, and repeated +oneofElementName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _TRUE + | _FALSE + | _INF + | _NAN + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes group +notGroupElementName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +// excludes stream +mtdElementName : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _RETURNS + +identifier : _NAME + | _SYNTAX + | _EDITION + | _IMPORT + | _WEAK + | _PUBLIC + | _PACKAGE + | _OPTION + | _TRUE + | _FALSE + | _INF + | _NAN + | _REPEATED + | _OPTIONAL + | _REQUIRED + | _DOUBLE + | _FLOAT + | _INT32 + | _INT64 + | _UINT32 + | _UINT64 + | _SINT32 + | _SINT64 + | _FIXED32 + | _FIXED64 + | _SFIXED32 + | _SFIXED64 + | _BOOL + | _STRING + | _BYTES + | _GROUP + | _ONEOF + | _MAP + | _EXTENSIONS + | _TO + | _MAX + | _RESERVED + | _ENUM + | _MESSAGE + | _EXTEND + | _SERVICE + | _RPC + | _STREAM + | _RETURNS + +%% diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y.go b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go new file mode 100644 index 00000000..048e5ccc --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go @@ -0,0 +1,2659 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by goyacc -o proto.y.go -l -p proto proto.y. DO NOT EDIT. +package parser + +import __yyfmt__ "fmt" + +//lint:file-ignore SA4006 generated parser has unused values + +import ( + "math" + "strings" + + "github.com/bufbuild/protocompile/ast" +) + +type protoSymType struct { + yys int + file *ast.FileNode + syn *ast.SyntaxNode + ed *ast.EditionNode + fileElements []ast.FileElement + pkg nodeWithRunes[*ast.PackageNode] + imprt nodeWithRunes[*ast.ImportNode] + msg nodeWithRunes[*ast.MessageNode] + msgElements []ast.MessageElement + fld *ast.FieldNode + msgFld nodeWithRunes[*ast.FieldNode] + mapFld nodeWithRunes[*ast.MapFieldNode] + mapType *ast.MapTypeNode + grp *ast.GroupNode + msgGrp nodeWithRunes[*ast.GroupNode] + oo nodeWithRunes[*ast.OneofNode] + ooElement ast.OneofElement + ooElements []ast.OneofElement + ext nodeWithRunes[*ast.ExtensionRangeNode] + resvd nodeWithRunes[*ast.ReservedNode] + en nodeWithRunes[*ast.EnumNode] + enElements []ast.EnumElement + env nodeWithRunes[*ast.EnumValueNode] + extend nodeWithRunes[*ast.ExtendNode] + extElement ast.ExtendElement + extElements []ast.ExtendElement + svc nodeWithRunes[*ast.ServiceNode] + svcElements []ast.ServiceElement + mtd nodeWithRunes[*ast.RPCNode] + mtdMsgType *ast.RPCTypeNode + mtdElements []ast.RPCElement + optRaw *ast.OptionNode + opt nodeWithRunes[*ast.OptionNode] + opts *compactOptionSlices + refRaw *ast.FieldReferenceNode + ref nodeWithRunes[*ast.FieldReferenceNode] + optNms *fieldRefSlices + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeSlices + names *nameSlices + cidPart nodeWithRunes[*ast.IdentNode] + cid *identSlices + tid ast.IdentValueNode + sl *valueSlices + msgLitFlds *messageFieldList + msgLitFld *ast.MessageFieldNode + v ast.ValueNode + il ast.IntValueNode + str []*ast.StringLiteralNode + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + bs []*ast.RuneNode + err error +} + +const _STRING_LIT = 57346 +const _INT_LIT = 57347 +const _FLOAT_LIT = 57348 +const _NAME = 57349 +const _SYNTAX = 57350 +const _EDITION = 57351 +const _IMPORT = 57352 +const _WEAK = 57353 +const _PUBLIC = 57354 +const _PACKAGE = 57355 +const _OPTION = 57356 +const _TRUE = 57357 +const _FALSE = 57358 +const _INF = 57359 +const _NAN = 57360 +const _REPEATED = 57361 +const _OPTIONAL = 57362 +const _REQUIRED = 57363 +const _DOUBLE = 57364 +const _FLOAT = 57365 +const _INT32 = 57366 +const _INT64 = 57367 +const _UINT32 = 57368 +const _UINT64 = 57369 +const _SINT32 = 57370 +const _SINT64 = 57371 +const _FIXED32 = 57372 +const _FIXED64 = 57373 +const _SFIXED32 = 57374 +const _SFIXED64 = 57375 +const _BOOL = 57376 +const _STRING = 57377 +const _BYTES = 57378 +const _GROUP = 57379 +const _ONEOF = 57380 +const _MAP = 57381 +const _EXTENSIONS = 57382 +const _TO = 57383 +const _MAX = 57384 +const _RESERVED = 57385 +const _ENUM = 57386 +const _MESSAGE = 57387 +const _EXTEND = 57388 +const _SERVICE = 57389 +const _RPC = 57390 +const _STREAM = 57391 +const _RETURNS = 57392 +const _ERROR = 57393 + +var protoToknames = [...]string{ + "$end", + "error", + "$unk", + "_STRING_LIT", + "_INT_LIT", + "_FLOAT_LIT", + "_NAME", + "_SYNTAX", + "_EDITION", + "_IMPORT", + "_WEAK", + "_PUBLIC", + "_PACKAGE", + "_OPTION", + "_TRUE", + "_FALSE", + "_INF", + "_NAN", + "_REPEATED", + "_OPTIONAL", + "_REQUIRED", + "_DOUBLE", + "_FLOAT", + "_INT32", + "_INT64", + "_UINT32", + "_UINT64", + "_SINT32", + "_SINT64", + "_FIXED32", + "_FIXED64", + "_SFIXED32", + "_SFIXED64", + "_BOOL", + "_STRING", + "_BYTES", + "_GROUP", + "_ONEOF", + "_MAP", + "_EXTENSIONS", + "_TO", + "_MAX", + "_RESERVED", + "_ENUM", + "_MESSAGE", + "_EXTEND", + "_SERVICE", + "_RPC", + "_STREAM", + "_RETURNS", + "_ERROR", + "'='", + "';'", + "':'", + "'{'", + "'}'", + "'\\\\'", + "'/'", + "'?'", + "'.'", + "','", + "'>'", + "'<'", + "'+'", + "'-'", + "'('", + "')'", + "'['", + "']'", + "'*'", + "'&'", + "'^'", + "'%'", + "'$'", + "'#'", + "'@'", + "'!'", + "'~'", + "'`'", +} + +var protoStatenames = [...]string{} + +const protoEofCode = 1 +const protoErrCode = 2 +const protoInitialStackSize = 16 + +var protoExca = [...]int16{ + -1, 0, + 1, 6, + -2, 21, + -1, 1, + 1, -1, + -2, 0, + -1, 2, + 1, 1, + -2, 21, + -1, 3, + 1, 2, + -2, 21, + -1, 14, + 1, 7, + -2, 0, + -1, 89, + 52, 60, + 61, 60, + 69, 60, + -2, 61, + -1, 101, + 55, 37, + 58, 37, + 62, 37, + 67, 37, + 69, 37, + -2, 34, + -1, 112, + 52, 60, + 61, 60, + 69, 60, + -2, 62, + -1, 118, + 56, 249, + -2, 0, + -1, 121, + 55, 37, + 58, 37, + 62, 37, + 67, 37, + 69, 37, + -2, 35, + -1, 140, + 56, 225, + -2, 0, + -1, 142, + 56, 214, + -2, 0, + -1, 144, + 56, 250, + -2, 0, + -1, 198, + 56, 262, + -2, 0, + -1, 203, + 56, 83, + 62, 83, + -2, 0, + -1, 214, + 56, 226, + -2, 0, + -1, 271, + 56, 215, + -2, 0, + -1, 377, + 56, 263, + -2, 0, + -1, 464, + 56, 155, + -2, 0, + -1, 523, + 69, 144, + -2, 141, + -1, 531, + 56, 156, + -2, 0, + -1, 607, + 67, 52, + -2, 49, + -1, 665, + 69, 144, + -2, 142, + -1, 690, + 67, 52, + -2, 50, + -1, 732, + 56, 273, + -2, 0, + -1, 745, + 56, 274, + -2, 0, +} + +const protoPrivate = 57344 + +const protoLast = 2053 + +var protoAct = [...]int16{ + 140, 7, 746, 7, 7, 100, 139, 18, 440, 394, + 604, 436, 607, 439, 502, 39, 524, 596, 95, 532, + 496, 127, 437, 422, 520, 200, 32, 34, 523, 233, + 421, 40, 90, 93, 94, 405, 102, 106, 36, 96, + 109, 435, 272, 85, 378, 458, 326, 404, 21, 20, + 19, 107, 108, 149, 215, 202, 145, 98, 101, 86, + 663, 89, 449, 390, 134, 706, 703, 598, 707, 513, + 9, 652, 395, 510, 465, 9, 511, 396, 717, 651, + 507, 459, 459, 460, 452, 459, 456, 9, 506, 459, + 459, 462, 739, 90, 693, 451, 655, 598, 459, 9, + 680, 653, 459, 687, 508, 459, 423, 459, 124, 125, + 453, 115, 459, 459, 459, 134, 126, 133, 142, 138, + 131, 129, 497, 395, 198, 130, 423, 134, 199, 448, + 416, 388, 389, 711, 489, 395, 505, 119, 9, 387, + 207, 666, 488, 593, 9, 468, 472, 113, 222, 112, + 273, 386, 470, 462, 587, 9, 373, 120, 121, 385, + 110, 40, 110, 691, 674, 428, 424, 414, 374, 122, + 114, 375, 279, 760, 758, 754, 750, 104, 744, 743, + 741, 733, 729, 721, 695, 9, 424, 716, 753, 219, + 217, 218, 668, 383, 227, 376, 322, 270, 213, 728, + 719, 323, 713, 658, 464, 123, 379, 118, 117, 207, + 116, 5, 6, 104, 399, 9, 598, 104, 670, 324, + 31, 702, 222, 667, 493, 490, 9, 492, 430, 392, + 419, 111, 13, 12, 403, 599, 407, 408, 413, 528, + 463, 40, 381, 748, 726, 8, 412, 724, 397, 659, + 33, 415, 15, 656, 26, 26, 9, 37, 38, 384, + 210, 209, 105, 219, 217, 218, 103, 35, 227, 400, + 595, 417, 211, 212, 402, 23, 529, 594, 104, 273, + 409, 582, 406, 24, 413, 516, 25, 26, 382, 495, + 491, 4, 412, 33, 10, 11, 731, 745, 380, 197, + 377, 279, 475, 476, 477, 478, 479, 480, 481, 482, + 483, 484, 485, 486, 418, 22, 143, 28, 27, 29, + 30, 144, 274, 425, 141, 271, 220, 420, 275, 225, + 411, 426, 427, 410, 40, 530, 531, 214, 231, 224, + 221, 535, 147, 223, 429, 146, 534, 216, 204, 203, + 447, 499, 601, 538, 150, 228, 605, 99, 602, 327, + 540, 154, 234, 277, 606, 329, 542, 156, 237, 474, + 391, 393, 438, 132, 128, 87, 88, 432, 206, 91, + 431, 521, 518, 533, 522, 379, 17, 16, 434, 14, + 3, 2, 1, 0, 0, 442, 442, 0, 0, 0, + 0, 207, 0, 0, 457, 0, 0, 454, 455, 466, + 0, 469, 471, 0, 0, 0, 0, 0, 0, 450, + 473, 445, 433, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 444, 0, 494, 0, 0, 0, 0, 0, + 0, 0, 0, 487, 0, 0, 0, 498, 0, 442, + 461, 0, 0, 0, 467, 503, 514, 0, 0, 517, + 0, 525, 526, 0, 0, 90, 504, 0, 583, 584, + 0, 0, 0, 0, 0, 0, 0, 0, 586, 0, + 0, 0, 0, 0, 585, 0, 0, 0, 588, 0, + 591, 0, 509, 0, 0, 0, 0, 0, 527, 0, + 512, 515, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 592, 0, 660, 661, 657, 590, + 0, 0, 0, 0, 0, 0, 0, 90, 0, 0, + 654, 0, 0, 589, 0, 0, 0, 0, 0, 0, + 0, 597, 0, 90, 672, 673, 664, 40, 0, 0, + 665, 669, 0, 0, 671, 0, 0, 675, 0, 0, + 0, 0, 662, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 676, 0, 0, 0, 0, 0, 0, + 679, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 678, 0, 0, 0, + 0, 0, 0, 682, 0, 684, 689, 0, 690, 686, + 685, 0, 0, 0, 0, 0, 0, 0, 677, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 681, 683, 0, 688, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 442, 0, + 697, 0, 0, 699, 503, 696, 0, 692, 0, 701, + 0, 0, 0, 133, 0, 504, 131, 129, 710, 0, + 709, 130, 0, 0, 0, 0, 715, 712, 0, 700, + 704, 0, 0, 0, 0, 0, 720, 0, 0, 722, + 718, 714, 694, 0, 0, 698, 0, 0, 133, 0, + 0, 131, 129, 0, 727, 0, 130, 732, 705, 708, + 730, 0, 735, 725, 723, 0, 734, 0, 0, 0, + 0, 0, 0, 0, 749, 742, 0, 0, 0, 0, + 747, 736, 737, 0, 0, 755, 752, 0, 756, 0, + 0, 757, 0, 747, 0, 0, 751, 0, 0, 0, + 759, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 738, 501, 740, 33, 137, 135, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 0, 0, 0, 0, 134, 0, 0, 0, 0, 0, + 0, 0, 395, 0, 441, 0, 0, 0, 500, 33, + 137, 135, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 0, 0, 0, 0, + 134, 0, 0, 0, 0, 0, 0, 0, 395, 0, + 441, 0, 0, 443, 33, 137, 135, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 0, 0, 0, 0, 134, 0, 0, 0, 0, + 0, 0, 0, 395, 0, 441, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 205, 92, 0, 0, 519, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 446, 0, 205, 0, 0, 0, + 208, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 0, 0, 0, 0, 0, + 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 208, 33, 137, 135, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 0, 0, 0, 0, 134, 0, 0, 0, 0, 0, + 205, 0, 0, 0, 136, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 33, 423, 208, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 424, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 92, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, + 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, + 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, + 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, + 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 600, 650, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 603, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, + 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, + 356, 357, 358, 359, 401, 360, 361, 362, 363, 364, + 365, 366, 367, 368, 369, 370, 371, 372, 0, 0, + 0, 0, 0, 226, 0, 0, 0, 328, 238, 239, + 240, 241, 242, 243, 244, 26, 245, 246, 247, 248, + 153, 152, 151, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 0, 230, + 236, 229, 264, 265, 232, 28, 27, 29, 266, 267, + 268, 269, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 235, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, + 358, 359, 325, 360, 361, 362, 363, 364, 365, 366, + 367, 368, 369, 370, 371, 372, 0, 0, 0, 0, + 0, 148, 0, 0, 0, 328, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 153, 152, + 151, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 0, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 0, 0, 0, 0, 0, 536, 0, 0, 0, 155, + 543, 544, 545, 546, 547, 548, 549, 537, 550, 551, + 552, 553, 0, 0, 0, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, + 539, 569, 570, 571, 572, 573, 574, 575, 576, 577, + 578, 579, 580, 581, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 541, 210, 209, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 33, 406, 0, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 276, 0, 0, + 0, 0, 280, 281, 282, 283, 284, 285, 286, 26, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 278, 315, + 316, 317, 318, 319, 320, 321, 398, 0, 0, 0, + 0, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 608, + 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, + 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, + 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, + 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, + 649, 0, 650, +} + +var protoPact = [...]int16{ + 203, -1000, 162, 162, -1000, 181, 180, 273, 167, -1000, + -1000, -1000, 289, 289, 273, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 246, 1958, 1329, 1958, 1958, 1389, + 1958, -1000, 213, -1000, 209, -1000, 173, 289, 289, 102, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 179, -1000, 1329, 110, -1000, + -1000, -1000, 1389, 155, 153, 152, -1000, 1958, -1000, 1958, + 109, -1000, 150, -1000, -1000, -1000, -1000, 173, 173, -1000, + 1958, 1149, -1000, -1000, -1000, 52, 162, 162, 1659, -1000, + -1000, -1000, -1000, 162, -1000, -1000, -1000, 162, -1000, -1000, + 274, -1000, -1000, -1000, 1084, -1000, 255, -1000, -1000, 142, + 1551, 141, 1865, 140, 1659, -1000, -1000, -1000, 166, 1605, + 1958, -1000, -1000, -1000, 108, 1958, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 139, 240, -1000, + 137, -1000, -1000, 1208, 98, 78, 9, -1000, 1914, -1000, + -1000, -1000, -1000, 162, 1551, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1497, 1958, 277, + 1958, 1958, 1816, -1000, 107, 1958, 67, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 162, 1865, -1000, -1000, -1000, -1000, -1000, 178, 1270, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 162, -1000, -1000, 1958, 1958, 105, 1958, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 176, 1958, 100, 162, 240, -1000, -1000, + -1000, -1000, 1958, -1000, -1000, -1000, -1000, -1000, -1000, 835, + 835, -1000, -1000, -1000, -1000, 1022, 60, 26, 41, -1000, + -1000, 1958, 1958, 34, 30, -1000, 199, 149, 22, 92, + 91, 85, 274, -1000, 1958, 100, 278, -1000, -1000, 121, + 81, -1000, 184, -1000, 285, -1000, 175, 172, 1958, 100, + 284, -1000, -1000, -1000, 56, -1000, -1000, -1000, -1000, 274, + -1000, 1769, -1000, 769, -1000, 74, -1000, 19, -1000, 35, + -1000, -1000, 1958, -1000, 21, 17, 280, -1000, 162, 959, + 162, 162, 277, 234, 1713, 276, -1000, 162, 162, -1000, + 289, -1000, 1958, -1000, 93, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 46, 121, 162, + 101, -1000, 272, 265, -1000, 44, 185, 1443, -1000, 10, + -1000, 32, -1000, -1000, -1000, -1000, -1000, 72, -1000, 27, + 248, 162, 148, 244, -1000, 162, 46, -1000, -9, -1000, + -1000, 1329, 80, -1000, 171, -1000, -1000, -1000, -1000, -1000, + 136, 1713, -1000, -1000, -1000, -1000, 165, 1329, 1958, 1958, + 104, 1958, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 46, -1000, -1000, 274, -1000, 1389, -1000, 162, + -1000, -1000, -1000, -1000, 45, 44, -1000, 163, -1000, 56, + 1389, 36, -1000, 1958, -1000, 2002, 103, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 900, -1000, -1000, -1000, 39, 128, 162, 46, + -1000, -1000, 162, -1000, -1000, -1000, -1000, 1149, 162, -1000, + -1000, 169, 14, 13, 1958, 100, -1000, 162, 71, -1000, + 162, 147, -1000, 163, -1000, 132, 11, -1000, -1000, -1000, + -1000, -1000, -1000, 162, 145, 162, 127, -1000, 162, -1000, + -1000, -1000, 1149, 242, -1000, 163, 239, 162, 144, -1000, + -1000, -1000, 126, 162, -1000, -1000, 162, -1000, 125, 162, + -1000, 162, -1000, 163, 44, -1000, 37, 124, 162, -1000, + 123, 122, 241, 162, 120, -1000, -1000, -1000, 163, 162, + 133, -1000, 119, -1000, 162, 241, -1000, -1000, -1000, -1000, + 162, -1000, 118, 162, -1000, -1000, -1000, -1000, -1000, 117, + -1000, +} + +var protoPgo = [...]int16{ + 0, 392, 391, 390, 291, 252, 389, 387, 386, 384, + 383, 7, 28, 24, 382, 381, 379, 378, 376, 61, + 59, 16, 375, 45, 41, 21, 374, 11, 9, 22, + 8, 373, 372, 14, 371, 370, 23, 5, 369, 368, + 367, 366, 365, 364, 363, 53, 58, 57, 12, 10, + 15, 362, 361, 360, 359, 358, 39, 357, 356, 18, + 355, 354, 353, 46, 352, 351, 350, 349, 55, 25, + 348, 347, 346, 345, 343, 342, 341, 340, 339, 338, + 50, 54, 337, 6, 19, 336, 335, 333, 330, 329, + 328, 29, 35, 30, 47, 327, 326, 49, 42, 325, + 324, 322, 48, 56, 321, 316, 13, 315, 44, 300, + 299, 298, 2, 297, 296, 20, 17, 0, 245, +} + +var protoR1 = [...]int8{ + 0, 1, 1, 1, 1, 1, 1, 4, 6, 6, + 5, 5, 5, 5, 5, 5, 5, 5, 118, 118, + 117, 117, 116, 116, 2, 3, 7, 7, 7, 8, + 50, 50, 56, 56, 57, 57, 47, 47, 46, 51, + 51, 52, 52, 53, 53, 54, 54, 55, 55, 58, + 58, 49, 49, 48, 10, 11, 18, 18, 19, 20, + 20, 22, 22, 21, 21, 16, 25, 25, 26, 26, + 26, 26, 30, 30, 30, 30, 31, 31, 106, 106, + 28, 28, 69, 68, 68, 67, 67, 67, 67, 67, + 67, 70, 70, 70, 17, 17, 17, 17, 24, 24, + 24, 27, 27, 27, 27, 35, 35, 29, 29, 29, + 32, 32, 32, 65, 65, 33, 33, 34, 34, 34, + 66, 66, 59, 59, 60, 60, 61, 61, 62, 62, + 63, 63, 64, 64, 45, 45, 45, 23, 23, 14, + 14, 15, 15, 13, 13, 12, 9, 9, 75, 75, + 77, 77, 77, 77, 74, 86, 86, 85, 85, 84, + 84, 84, 84, 84, 72, 72, 72, 72, 76, 76, + 76, 76, 78, 78, 78, 78, 79, 38, 38, 38, + 38, 38, 38, 38, 38, 38, 38, 38, 38, 96, + 96, 94, 94, 92, 92, 92, 95, 95, 93, 93, + 93, 36, 36, 89, 89, 90, 90, 91, 91, 87, + 87, 88, 88, 97, 100, 100, 99, 99, 98, 98, + 98, 98, 101, 101, 80, 83, 83, 82, 82, 81, + 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, + 71, 71, 71, 71, 71, 71, 71, 71, 102, 105, + 105, 104, 104, 103, 103, 103, 103, 73, 73, 73, + 73, 107, 110, 110, 109, 109, 108, 108, 108, 111, + 111, 115, 115, 114, 114, 113, 113, 112, 112, 39, + 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, + 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, + 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, + 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 40, 44, 44, 44, 44, 44, 44, 44, 44, + 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, + 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, + 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, + 44, 44, 44, 44, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 43, 43, 43, 37, + 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, +} + +var protoR2 = [...]int8{ + 0, 1, 1, 1, 2, 2, 0, 2, 2, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 1, 0, 1, 0, 4, 4, 3, 4, 4, 3, + 1, 3, 1, 2, 1, 2, 1, 1, 2, 1, + 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, + 2, 1, 1, 2, 5, 5, 1, 1, 2, 1, + 1, 1, 2, 1, 2, 3, 1, 1, 1, 1, + 1, 1, 1, 2, 1, 2, 2, 2, 1, 2, + 3, 2, 1, 1, 2, 1, 2, 2, 2, 2, + 1, 3, 2, 3, 1, 3, 5, 3, 1, 1, + 1, 1, 1, 2, 1, 1, 1, 1, 3, 2, + 3, 2, 3, 1, 3, 1, 1, 3, 2, 3, + 1, 3, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 1, 1, 3, 2, 1, + 2, 1, 2, 1, 1, 2, 3, 1, 8, 9, + 9, 10, 7, 8, 6, 0, 1, 2, 1, 1, + 1, 1, 2, 1, 5, 6, 3, 4, 7, 8, + 5, 6, 5, 6, 3, 4, 6, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, + 4, 1, 3, 1, 3, 3, 1, 3, 1, 3, + 3, 1, 2, 4, 1, 4, 1, 3, 3, 1, + 3, 1, 3, 6, 1, 2, 2, 1, 1, 1, + 1, 1, 4, 5, 6, 1, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 6, 7, 5, 6, 4, 5, 3, 4, 6, 0, + 1, 2, 1, 1, 1, 2, 1, 6, 7, 5, + 6, 6, 1, 2, 2, 1, 1, 1, 1, 6, + 9, 4, 3, 1, 2, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, +} + +var protoChk = [...]int16{ + -1000, -1, -2, -3, -4, 8, 9, -117, -118, 53, + -4, -4, 52, 52, -6, -5, -7, -8, -11, -80, + -97, -102, -107, 2, 10, 13, 14, 45, 44, 46, + 47, 53, -106, 4, -106, -5, -106, 11, 12, -50, + -37, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, -21, -20, -22, -18, -19, + -37, -16, 66, -37, -37, -59, -56, 60, -47, -57, + -37, -46, -37, 53, 4, 53, -117, -106, -106, -117, + 60, 52, -19, -20, 60, -59, 55, 55, 55, -56, + -47, -46, 60, 55, -117, -117, -37, -25, -26, -28, + -106, -30, -31, -37, 55, 6, 65, 5, 67, -83, + -117, -100, -117, -105, -104, -103, -73, -75, 2, -45, + -61, 21, 20, 19, -52, 60, -40, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, -110, -117, -117, + -69, 56, -68, -67, -70, 2, -17, -37, 68, 6, + 5, 17, 18, 56, -82, -81, -71, -97, -80, -102, + -96, -77, -11, -74, -78, -89, 2, -45, -60, 40, + 38, -79, 43, -91, -51, 60, 39, -39, 7, 8, + 9, 10, 11, 12, 13, 15, 16, 17, 18, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 41, 42, 47, 48, 49, 50, + 56, -99, -98, -11, -101, -90, 2, -44, 43, -91, + 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, + 49, 50, 56, -103, 53, 37, -63, -54, 60, -42, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, -37, 60, -50, 56, -109, -108, -11, + -111, 2, 48, 56, -68, 61, 53, 61, 53, 54, + 54, -35, -29, -34, -28, 63, 68, -56, 2, -117, + -81, 37, -63, -37, -94, -92, 5, -37, -37, -94, + -87, -88, -106, -37, 60, -50, 63, -117, -98, 52, + -95, -93, -36, 5, 65, -117, -37, -37, 60, -50, + 52, -37, -117, -108, -37, -24, -27, -29, -32, -106, + -30, 65, -37, 68, -24, -69, 62, -66, 69, 2, + -29, 69, 58, 69, -37, -37, 52, -117, -23, 68, + 53, -23, 61, 41, 55, 52, -117, -23, 53, -117, + 61, -117, 61, -37, -38, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, -36, 61, 53, + 41, 5, 52, 52, -37, 5, -115, 66, -37, -65, + 69, 2, -33, -27, -29, 62, 69, 61, 69, -56, + 52, 55, -23, 52, -117, -23, 5, -117, -14, 69, + -13, -15, -9, -12, -21, -117, -117, -92, 5, 42, + -86, -85, -84, -10, -72, -76, 2, 14, -62, 37, + -53, 60, -41, 7, 8, 9, 10, 11, 12, 13, + 15, 16, 17, 18, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 5, -117, -117, -106, -37, 61, -117, -23, + -93, -117, -36, 42, 5, 5, -116, -23, 53, 50, + 49, -64, -55, 60, -49, -58, -43, -48, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 50, 69, 61, 69, -29, 69, 5, -83, 55, 5, + -117, -117, -23, 69, -13, -12, 61, 52, 56, -84, + 53, -21, -37, -37, 60, -50, -117, -23, -59, -117, + 55, -23, -116, -23, -116, -115, -59, 67, -56, -49, + -48, 60, -33, 55, -23, 56, -83, -117, -23, -117, + -25, -117, 52, 52, -116, -23, 52, 55, -23, -37, + -117, 62, -83, 55, -116, -117, 55, 67, -83, 55, + -117, 56, -117, -25, 5, -116, 5, -83, 55, 56, + -83, -114, -117, 56, -83, -117, -116, -116, -23, 55, + -23, 56, -83, 56, 56, -113, -112, -11, 2, -117, + 56, -116, -83, 55, 56, -117, -112, -117, 56, -83, + 56, +} + +var protoDef = [...]int16{ + -2, -2, -2, -2, 3, 0, 0, 0, 20, 18, + 4, 5, 0, 0, -2, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, + 0, 19, 0, 78, 0, 8, 21, 0, 0, 21, + 30, 519, 520, 521, 522, 523, 524, 525, 526, 527, + 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, + 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, + 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, + 558, 559, 560, 561, 562, 0, 63, 0, 59, -2, + 56, 57, 0, 0, 0, 0, 122, 0, 32, 0, + 36, -2, 0, 24, 79, 25, 26, 21, 21, 29, + 0, 0, -2, 64, 58, 0, 21, 21, -2, 123, + 33, -2, 38, 21, 27, 28, 31, 21, 66, 67, + 68, 69, 70, 71, 0, 72, 0, 74, 65, 0, + -2, 0, -2, 0, -2, 252, 253, 254, 256, 0, + 0, 134, 135, 136, 126, 0, 41, 312, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, + 345, 346, 347, 348, 349, 350, 351, 0, -2, 55, + 0, 81, 82, -2, 85, 90, 0, 94, 0, 73, + 75, 76, 77, 21, -2, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 0, 0, 0, + 0, 0, 0, 204, 124, 0, 305, 39, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 306, 307, 308, 309, 310, 311, + 21, -2, 217, 218, 219, 220, 221, 0, 0, 206, + 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, + 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, + 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, + 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, + 392, 393, 21, 251, 255, 0, 0, 130, 0, 45, + 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, + 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, + 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, + 473, 474, 475, 0, 0, 127, 21, -2, 265, 266, + 267, 268, 0, 80, 84, 86, 87, 88, 89, 0, + 0, 92, 105, 106, 107, 0, 0, 0, 0, 224, + 227, 0, 0, 21, 0, 191, 193, 0, 21, 0, + 21, 21, 209, 211, 0, 125, 0, 213, 216, 0, + 0, 196, 198, 201, 0, 248, 0, 0, 0, 131, + 0, 42, 261, 264, 0, 93, 98, 99, 100, 101, + 102, 0, 104, 0, 91, 0, 109, 0, 118, 0, + 120, 95, 0, 97, 0, 21, 0, 246, 21, 0, + 21, 21, 0, 0, -2, 0, 174, 21, 21, 207, + 0, 208, 0, 40, 0, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 21, 0, 21, + 0, 202, 0, 0, 46, 23, 0, 0, 103, 0, + 111, 0, 113, 115, 116, 108, 117, 0, 119, 0, + 0, 21, 0, 0, 244, 21, 21, 247, 0, 138, + 139, 0, 143, -2, 147, 189, 190, 192, 194, 195, + 0, -2, 158, 159, 160, 161, 163, 0, 0, 0, + 128, 0, 43, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, + 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, + 431, 432, 21, 175, 203, 210, 212, 0, 222, 21, + 197, 205, 199, 200, 0, 23, 259, 23, 22, 0, + 0, 0, 132, 0, 47, 0, 51, -2, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, + 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, + 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, + 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, + 518, 110, 0, 112, 121, 96, 0, 0, 21, 21, + 245, 242, 21, 137, 140, -2, 145, 0, 21, 157, + 162, 0, 23, 0, 0, 129, 172, 21, 0, 223, + 21, 0, 257, 23, 260, 21, 0, 272, 133, 48, + -2, 53, 114, 21, 0, 21, 0, 240, 21, 243, + 146, 154, 0, 0, 166, 23, 0, 21, 0, 44, + 173, 176, 0, 21, 258, 269, 21, 271, 0, 21, + 152, 21, 241, 23, 23, 167, 0, 0, 21, 148, + 0, 0, -2, 21, 0, 153, 54, 164, 23, 21, + 0, 170, 0, 149, 21, -2, 276, 277, 278, 150, + 21, 165, 0, 21, 171, 270, 275, 151, 168, 0, + 169, +} + +var protoTok1 = [...]int8{ + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 77, 3, 75, 74, 73, 71, 3, + 66, 67, 70, 64, 61, 65, 60, 58, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 54, 53, + 63, 52, 62, 59, 76, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 68, 57, 69, 72, 3, 79, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 55, 3, 56, 78, +} + +var protoTok2 = [...]int8{ + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, +} + +var protoTok3 = [...]int8{ + 0, +} + +var protoErrorMessages = [...]struct { + state int + token int + msg string +}{} + +/* parser for yacc output */ + +var ( + protoDebug = 0 + protoErrorVerbose = false +) + +type protoLexer interface { + Lex(lval *protoSymType) int + Error(s string) +} + +type protoParser interface { + Parse(protoLexer) int + Lookahead() int +} + +type protoParserImpl struct { + lval protoSymType + stack [protoInitialStackSize]protoSymType + char int +} + +func (p *protoParserImpl) Lookahead() int { + return p.char +} + +func protoNewParser() protoParser { + return &protoParserImpl{} +} + +const protoFlag = -1000 + +func protoTokname(c int) string { + if c >= 1 && c-1 < len(protoToknames) { + if protoToknames[c-1] != "" { + return protoToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func protoStatname(s int) string { + if s >= 0 && s < len(protoStatenames) { + if protoStatenames[s] != "" { + return protoStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func protoErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !protoErrorVerbose { + return "syntax error" + } + + for _, e := range protoErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + protoTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := int(protoPact[state]) + for tok := TOKSTART; tok-1 < len(protoToknames); tok++ { + if n := base + tok; n >= 0 && n < protoLast && int(protoChk[int(protoAct[n])]) == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if protoDef[state] == -2 { + i := 0 + for protoExca[i] != -1 || int(protoExca[i+1]) != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; protoExca[i] >= 0; i += 2 { + tok := int(protoExca[i]) + if tok < TOKSTART || protoExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if protoExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += protoTokname(tok) + } + return res +} + +func protolex1(lex protoLexer, lval *protoSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = int(protoTok1[0]) + goto out + } + if char < len(protoTok1) { + token = int(protoTok1[char]) + goto out + } + if char >= protoPrivate { + if char < protoPrivate+len(protoTok2) { + token = int(protoTok2[char-protoPrivate]) + goto out + } + } + for i := 0; i < len(protoTok3); i += 2 { + token = int(protoTok3[i+0]) + if token == char { + token = int(protoTok3[i+1]) + goto out + } + } + +out: + if token == 0 { + token = int(protoTok2[1]) /* unknown char */ + } + if protoDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char)) + } + return char, token +} + +func protoParse(protolex protoLexer) int { + return protoNewParser().Parse(protolex) +} + +func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int { + var proton int + var protoVAL protoSymType + var protoDollar []protoSymType + _ = protoDollar // silence set and not used + protoS := protorcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + protostate := 0 + protorcvr.char = -1 + prototoken := -1 // protorcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + protostate = -1 + protorcvr.char = -1 + prototoken = -1 + }() + protop := -1 + goto protostack + +ret0: + return 0 + +ret1: + return 1 + +protostack: + /* put a state and value onto the stack */ + if protoDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate)) + } + + protop++ + if protop >= len(protoS) { + nyys := make([]protoSymType, len(protoS)*2) + copy(nyys, protoS) + protoS = nyys + } + protoS[protop] = protoVAL + protoS[protop].yys = protostate + +protonewstate: + proton = int(protoPact[protostate]) + if proton <= protoFlag { + goto protodefault /* simple state */ + } + if protorcvr.char < 0 { + protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval) + } + proton += prototoken + if proton < 0 || proton >= protoLast { + goto protodefault + } + proton = int(protoAct[proton]) + if int(protoChk[proton]) == prototoken { /* valid shift */ + protorcvr.char = -1 + prototoken = -1 + protoVAL = protorcvr.lval + protostate = proton + if Errflag > 0 { + Errflag-- + } + goto protostack + } + +protodefault: + /* default state action */ + proton = int(protoDef[protostate]) + if proton == -2 { + if protorcvr.char < 0 { + protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if protoExca[xi+0] == -1 && int(protoExca[xi+1]) == protostate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + proton = int(protoExca[xi+0]) + if proton < 0 || proton == prototoken { + break + } + } + proton = int(protoExca[xi+1]) + if proton < 0 { + goto ret0 + } + } + if proton == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + protolex.Error(protoErrorMessage(protostate, prototoken)) + Nerrs++ + if protoDebug >= 1 { + __yyfmt__.Printf("%s", protoStatname(protostate)) + __yyfmt__.Printf(" saw %s\n", protoTokname(prototoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for protop >= 0 { + proton = int(protoPact[protoS[protop].yys]) + protoErrCode + if proton >= 0 && proton < protoLast { + protostate = int(protoAct[proton]) /* simulate a shift of "error" */ + if int(protoChk[protostate]) == protoErrCode { + goto protostack + } + } + + /* the current p has no shift on "error", pop stack */ + if protoDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys) + } + protop-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if protoDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken)) + } + if prototoken == protoEofCode { + goto ret1 + } + protorcvr.char = -1 + prototoken = -1 + goto protonewstate /* try again in the same state */ + } + } + + /* reduction by production proton */ + if protoDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate)) + } + + protont := proton + protopt := protop + _ = protopt // guard against "declared and not used" + + protop -= int(protoR2[proton]) + // protop is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if protop+1 >= len(protoS) { + nyys := make([]protoSymType, len(protoS)*2) + copy(nyys, protoS) + protoS = nyys + } + protoVAL = protoS[protop+1] + + /* consult goto table to find next state */ + proton = int(protoR1[proton]) + protog := int(protoPgo[proton]) + protoj := protog + protoS[protop].yys + 1 + + if protoj >= protoLast { + protostate = int(protoAct[protog]) + } else { + protostate = int(protoAct[protoj]) + if int(protoChk[protostate]) != -proton { + protostate = int(protoAct[protog]) + } + } + // dummy call; replaced with literal code + switch protont { + + case 1: + protoDollar = protoS[protopt-1 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, nil, lex.eof) + lex.res = protoVAL.file + } + case 2: + protoDollar = protoS[protopt-1 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, nil, lex.eof) + lex.res = protoVAL.file + } + case 3: + protoDollar = protoS[protopt-1 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, nil, protoDollar[1].fileElements, lex.eof) + lex.res = protoVAL.file + } + case 4: + protoDollar = protoS[protopt-2 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, protoDollar[2].fileElements, lex.eof) + lex.res = protoVAL.file + } + case 5: + protoDollar = protoS[protopt-2 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, protoDollar[2].fileElements, lex.eof) + lex.res = protoVAL.file + } + case 6: + protoDollar = protoS[protopt-0 : protopt+1] + { + lex := protolex.(*protoLex) + protoVAL.file = ast.NewFileNode(lex.info, nil, nil, lex.eof) + lex.res = protoVAL.file + } + case 7: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.fileElements = prependRunes(toFileElement, protoDollar[1].bs, protoDollar[2].fileElements) + } + case 8: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.fileElements = append(protoDollar[1].fileElements, protoDollar[2].fileElements...) + } + case 9: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = protoDollar[1].fileElements + } + case 10: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].imprt.Node, protoDollar[1].imprt.Runes) + } + case 11: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].pkg.Node, protoDollar[1].pkg.Runes) + } + case 12: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + } + case 13: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes) + } + case 14: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].en.Node, protoDollar[1].en.Runes) + } + case 15: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes) + } + case 16: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].svc.Node, protoDollar[1].svc.Runes) + } + case 17: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.fileElements = nil + } + case 18: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.bs = []*ast.RuneNode{protoDollar[1].b} + } + case 19: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.bs = append(protoDollar[1].bs, protoDollar[2].b) + } + case 20: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.bs = protoDollar[1].bs + } + case 21: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.bs = nil + } + case 22: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.b = protoDollar[1].b + } + case 23: + protoDollar = protoS[protopt-0 : protopt+1] + { + protolex.(*protoLex).Error("syntax error: expecting ';'") + protoVAL.b = nil + } + case 24: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.syn = ast.NewSyntaxNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b) + } + case 25: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.ed = ast.NewEditionNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b) + } + case 26: + protoDollar = protoS[protopt-3 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, nil, toStringValueNode(protoDollar[2].str), semi), extra...) + } + case 27: + protoDollar = protoS[protopt-4 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, protoDollar[2].id.ToKeyword(), toStringValueNode(protoDollar[3].str), semi), extra...) + } + case 28: + protoDollar = protoS[protopt-4 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), nil, toStringValueNode(protoDollar[3].str), semi), extra...) + } + case 29: + protoDollar = protoS[protopt-3 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.pkg = newNodeWithRunes(ast.NewPackageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].cid.toIdentValueNode(nil), semi), extra...) + } + case 30: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 31: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) + protoVAL.cid = protoDollar[1].cid + } + case 32: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + } + case 33: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) + protoVAL.cid = protoDollar[1].cid + } + case 34: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + } + case 35: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) + protoVAL.cid = protoDollar[1].cid + } + case 36: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id) + } + case 37: + protoDollar = protoS[protopt-1 : protopt+1] + { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + protoVAL.cidPart = protoDollar[1].cidPart + } + case 38: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b) + } + case 39: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 40: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) + protoVAL.cid = protoDollar[1].cid + } + case 41: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 42: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) + protoVAL.cid = protoDollar[1].cid + } + case 43: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 44: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) + protoVAL.cid = protoDollar[1].cid + } + case 45: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 46: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) + protoVAL.cid = protoDollar[1].cid + } + case 47: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + } + case 48: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) + protoVAL.cid = protoDollar[1].cid + } + case 49: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + } + case 50: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) + protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) + protoVAL.cid = protoDollar[1].cid + } + case 51: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id) + } + case 52: + protoDollar = protoS[protopt-1 : protopt+1] + { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + protoVAL.cidPart = protoDollar[1].cidPart + } + case 53: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b) + } + case 54: + protoDollar = protoS[protopt-5 : protopt+1] + { + optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots) + protoVAL.optRaw = ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, protoDollar[5].b) + } + case 55: + protoDollar = protoS[protopt-5 : protopt+1] + { + optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots) + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) + protoVAL.opt = newNodeWithRunes(ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, semi), extra...) + } + case 56: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id) + } + case 57: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.refRaw = protoDollar[1].refRaw + } + case 58: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw, protoDollar[2].b) + } + case 59: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw) + } + case 60: + protoDollar = protoS[protopt-1 : protopt+1] + { + protolex.(*protoLex).Error("syntax error: unexpected '.'") + protoVAL.ref = protoDollar[1].ref + } + case 61: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes} + } + case 62: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node) + protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...) + protoVAL.optNms = protoDollar[1].optNms + } + case 63: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes} + } + case 64: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node) + protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...) + protoVAL.optNms = protoDollar[1].optNms + } + case 65: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b) + } + case 68: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = toStringValueNode(protoDollar[1].str) + } + case 71: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].id + } + case 72: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].f + } + case 73: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) + } + case 74: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].i + } + case 75: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].i.Val > math.MaxInt64+1 { + // can't represent as int so treat as float literal + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].i) + } else { + protoVAL.v = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) + } + } + case 76: + protoDollar = protoS[protopt-2 : protopt+1] + { + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + } + case 77: + protoDollar = protoS[protopt-2 : protopt+1] + { + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + } + case 78: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.str = []*ast.StringLiteralNode{protoDollar[1].s} + } + case 79: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.str = append(protoDollar[1].str, protoDollar[2].s) + } + case 80: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[2].msgLitFlds == nil { + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } else { + fields, delimiters := protoDollar[2].msgLitFlds.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b) + } + } + case 81: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + } + case 84: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgLitFlds != nil { + protoDollar[1].msgLitFlds.next = protoDollar[2].msgLitFlds + protoVAL.msgLitFlds = protoDollar[1].msgLitFlds + } else { + protoVAL.msgLitFlds = protoDollar[2].msgLitFlds + } + } + case 85: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].msgLitFld != nil { + protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld} + } else { + protoVAL.msgLitFlds = nil + } + } + case 86: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgLitFld != nil { + protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b} + } else { + protoVAL.msgLitFlds = nil + } + } + case 87: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].msgLitFld != nil { + protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b} + } else { + protoVAL.msgLitFlds = nil + } + } + case 88: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgLitFlds = nil + } + case 89: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgLitFlds = nil + } + case 90: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgLitFlds = nil + } + case 91: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].refRaw != nil && protoDollar[2].b != nil { + protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, protoDollar[2].b, protoDollar[3].v) + } else { + protoVAL.msgLitFld = nil + } + } + case 92: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].refRaw != nil && protoDollar[2].v != nil { + protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, nil, protoDollar[2].v) + } else { + protoVAL.msgLitFld = nil + } + } + case 93: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.msgLitFld = nil + } + case 94: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id) + } + case 95: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) + } + case 96: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.refRaw = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b) + } + case 97: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.refRaw = nil + } + case 101: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = toStringValueNode(protoDollar[1].str) + } + case 103: + protoDollar = protoS[protopt-2 : protopt+1] + { + kw := protoDollar[2].id.ToKeyword() + switch strings.ToLower(kw.Val) { + case "inf", "infinity", "nan": + // these are acceptable + default: + // anything else is not + protolex.(*protoLex).Error(`only identifiers "inf", "infinity", or "nan" may appear after negative sign`) + } + // we'll validate the identifier later + f := ast.NewSpecialFloatLiteralNode(kw) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + } + case 104: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.v = protoDollar[1].id + } + case 108: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[2].msgLitFlds == nil { + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } else { + fields, delimiters := protoDollar[2].msgLitFlds.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b) + } + } + case 109: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + } + case 110: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[2].sl == nil { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } else { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b) + } + } + case 111: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + } + case 112: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } + case 113: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}} + } + case 114: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v) + protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b) + protoVAL.sl = protoDollar[1].sl + } + case 117: + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[2].sl == nil { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } else { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b) + } + } + case 118: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + } + case 119: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + } + case 120: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}} + } + case 121: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v) + protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b) + protoVAL.sl = protoDollar[1].sl + } + case 122: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 123: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 124: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 125: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 126: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 127: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 128: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 129: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 130: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 131: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 132: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + } + case 133: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + } + case 137: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, protoDollar[2].opts.options, protoDollar[2].opts.commas, protoDollar[3].b) + } + case 138: + protoDollar = protoS[protopt-2 : protopt+1] + { + protolex.(*protoLex).Error("compact options must have at least one option") + protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + } + case 139: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes} + } + case 140: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node) + protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...) + protoVAL.opts = protoDollar[1].opts + } + case 141: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes} + } + case 142: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node) + protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...) + protoVAL.opts = protoDollar[1].opts + } + case 143: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw) + } + case 144: + protoDollar = protoS[protopt-1 : protopt+1] + { + protolex.(*protoLex).Error("syntax error: unexpected ','") + protoVAL.opt = protoDollar[1].opt + } + case 145: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw, protoDollar[2].b) + } + case 146: + protoDollar = protoS[protopt-3 : protopt+1] + { + optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots) + protoVAL.optRaw = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v) + } + case 147: + protoDollar = protoS[protopt-1 : protopt+1] + { + optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots) + protolex.(*protoLex).Error("compact option must have a value") + protoVAL.optRaw = ast.NewCompactOptionNode(optName, nil, nil) + } + case 148: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b) + } + case 149: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b) + } + case 150: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b), protoDollar[9].bs...) + } + case 151: + protoDollar = protoS[protopt-10 : protopt+1] + { + protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b), protoDollar[10].bs...) + } + case 152: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, nil, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b), protoDollar[7].bs...) + } + case 153: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b), protoDollar[8].bs...) + } + case 154: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.oo = newNodeWithRunes(ast.NewOneofNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooElements, protoDollar[5].b), protoDollar[6].bs...) + } + case 155: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.ooElements = nil + } + case 157: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].ooElement != nil { + protoVAL.ooElements = append(protoDollar[1].ooElements, protoDollar[2].ooElement) + } else { + protoVAL.ooElements = protoDollar[1].ooElements + } + } + case 158: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].ooElement != nil { + protoVAL.ooElements = []ast.OneofElement{protoDollar[1].ooElement} + } else { + protoVAL.ooElements = nil + } + } + case 159: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooElement = protoDollar[1].optRaw + } + case 160: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooElement = protoDollar[1].fld + } + case 161: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooElement = protoDollar[1].grp + } + case 162: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.ooElement = nil + } + case 163: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.ooElement = nil + } + case 164: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 165: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 166: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, protoDollar[3].b) + } + case 167: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b) + } + case 168: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b) + } + case 169: + protoDollar = protoS[protopt-8 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b) + } + case 170: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, nil, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b) + } + case 171: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b) + } + case 172: + protoDollar = protoS[protopt-5 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) + protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semi), extra...) + } + case 173: + protoDollar = protoS[protopt-6 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) + protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semi), extra...) + } + case 174: + protoDollar = protoS[protopt-3 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, nil, semi), extra...) + } + case 175: + protoDollar = protoS[protopt-4 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semi), extra...) + } + case 176: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.mapType = ast.NewMapTypeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].id, protoDollar[4].b, protoDollar[5].tid, protoDollar[6].b) + } + case 189: + protoDollar = protoS[protopt-4 : protopt+1] + { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `extensions 1 to 10` and `extensions 1` followed by `to = 10`. + protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, nil, protoDollar[3].b), protoDollar[4].bs...) + } + case 190: + protoDollar = protoS[protopt-4 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].cmpctOpts, semi), extra...) + } + case 191: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}} + } + case 192: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng) + protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b) + protoVAL.rngs = protoDollar[1].rngs + } + case 193: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, nil, nil, nil) + } + case 194: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), protoDollar[3].i, nil) + } + case 195: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) + } + case 196: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}} + } + case 197: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng) + protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b) + protoVAL.rngs = protoDollar[1].rngs + } + case 198: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, nil, nil, nil) + } + case 199: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), protoDollar[3].il, nil) + } + case 200: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) + } + case 201: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.il = protoDollar[1].i + } + case 202: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.il = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) + } + case 203: + protoDollar = protoS[protopt-4 : protopt+1] + { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. + protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...) + } + case 205: + protoDollar = protoS[protopt-4 : protopt+1] + { + // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict + // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. + protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...) + } + case 207: + protoDollar = protoS[protopt-3 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.resvd = newNodeWithRunes(ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.names, protoDollar[2].names.commas, semi), extra...) + } + case 208: + protoDollar = protoS[protopt-3 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.resvd = newNodeWithRunes(ast.NewReservedIdentifiersNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.idents, protoDollar[2].names.commas, semi), extra...) + } + case 209: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.names = &nameSlices{names: []ast.StringValueNode{toStringValueNode(protoDollar[1].str)}} + } + case 210: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].names.names = append(protoDollar[1].names.names, toStringValueNode(protoDollar[3].str)) + protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b) + protoVAL.names = protoDollar[1].names + } + case 211: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.names = &nameSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + } + case 212: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoDollar[1].names.idents = append(protoDollar[1].names.idents, protoDollar[3].id) + protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b) + protoVAL.names = protoDollar[1].names + } + case 213: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.en = newNodeWithRunes(ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enElements, protoDollar[5].b), protoDollar[6].bs...) + } + case 214: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, nil) + } + case 215: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, protoDollar[2].enElements) + } + case 216: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.enElements = append(protoDollar[1].enElements, protoDollar[2].enElements...) + } + case 217: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = protoDollar[1].enElements + } + case 218: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + } + case 219: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].env.Node, protoDollar[1].env.Runes) + } + case 220: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes) + } + case 221: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.enElements = nil + } + case 222: + protoDollar = protoS[protopt-4 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, semi), extra...) + } + case 223: + protoDollar = protoS[protopt-5 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) + protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, semi), extra...) + } + case 224: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.msg = newNodeWithRunes(ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b), protoDollar[6].bs...) + } + case 225: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, nil) + } + case 226: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, protoDollar[2].msgElements) + } + case 227: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.msgElements = append(protoDollar[1].msgElements, protoDollar[2].msgElements...) + } + case 228: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = protoDollar[1].msgElements + } + case 229: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgFld.Node, protoDollar[1].msgFld.Runes) + } + case 230: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].en.Node, protoDollar[1].en.Runes) + } + case 231: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes) + } + case 232: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes) + } + case 233: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].ext.Node, protoDollar[1].ext.Runes) + } + case 234: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgGrp.Node, protoDollar[1].msgGrp.Runes) + } + case 235: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + } + case 236: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].oo.Node, protoDollar[1].oo.Runes) + } + case 237: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].mapFld.Node, protoDollar[1].mapFld.Runes) + } + case 238: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes) + } + case 239: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.msgElements = nil + } + case 240: + protoDollar = protoS[protopt-6 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, semis), extra...) + } + case 241: + protoDollar = protoS[protopt-7 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[7].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, semis), extra...) + } + case 242: + protoDollar = protoS[protopt-5 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semis), extra...) + } + case 243: + protoDollar = protoS[protopt-6 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semis), extra...) + } + case 244: + protoDollar = protoS[protopt-4 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, nil, semis), extra...) + } + case 245: + protoDollar = protoS[protopt-5 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, semis), extra...) + } + case 246: + protoDollar = protoS[protopt-3 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, semis), extra...) + } + case 247: + protoDollar = protoS[protopt-4 : protopt+1] + { + semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) + protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semis), extra...) + } + case 248: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.extend = newNodeWithRunes(ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extElements, protoDollar[5].b), protoDollar[6].bs...) + } + case 249: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.extElements = nil + } + case 251: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[2].extElement != nil { + protoVAL.extElements = append(protoDollar[1].extElements, protoDollar[2].extElement) + } else { + protoVAL.extElements = protoDollar[1].extElements + } + } + case 252: + protoDollar = protoS[protopt-1 : protopt+1] + { + if protoDollar[1].extElement != nil { + protoVAL.extElements = []ast.ExtendElement{protoDollar[1].extElement} + } else { + protoVAL.extElements = nil + } + } + case 253: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extElement = protoDollar[1].fld + } + case 254: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extElement = protoDollar[1].grp + } + case 255: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.extElement = nil + } + case 256: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.extElement = nil + } + case 257: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + } + case 258: + protoDollar = protoS[protopt-7 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + } + case 259: + protoDollar = protoS[protopt-5 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) + } + case 260: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + } + case 261: + protoDollar = protoS[protopt-6 : protopt+1] + { + protoVAL.svc = newNodeWithRunes(ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcElements, protoDollar[5].b), protoDollar[6].bs...) + } + case 262: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, nil) + } + case 263: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, protoDollar[2].svcElements) + } + case 264: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.svcElements = append(protoDollar[1].svcElements, protoDollar[2].svcElements...) + } + case 265: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcElements = protoDollar[1].svcElements + } + case 266: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + } + case 267: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].mtd.Node, protoDollar[1].mtd.Runes) + } + case 268: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.svcElements = nil + } + case 269: + protoDollar = protoS[protopt-6 : protopt+1] + { + semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) + protoVAL.mtd = newNodeWithRunes(ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, semi), extra...) + } + case 270: + protoDollar = protoS[protopt-9 : protopt+1] + { + protoVAL.mtd = newNodeWithRunes(ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, protoDollar[6].b, protoDollar[7].mtdElements, protoDollar[8].b), protoDollar[9].bs...) + } + case 271: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b) + } + case 272: + protoDollar = protoS[protopt-3 : protopt+1] + { + protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b) + } + case 273: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, nil) + } + case 274: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, protoDollar[2].mtdElements) + } + case 275: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.mtdElements = append(protoDollar[1].mtdElements, protoDollar[2].mtdElements...) + } + case 276: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.mtdElements = protoDollar[1].mtdElements + } + case 277: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.mtdElements = toElements[ast.RPCElement](toMethodElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + } + case 278: + protoDollar = protoS[protopt-1 : protopt+1] + { + protoVAL.mtdElements = nil + } + } + goto protostack /* stack new state and value */ +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/result.go b/vendor/github.com/bufbuild/protocompile/parser/result.go new file mode 100644 index 00000000..4aa83e7d --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/result.go @@ -0,0 +1,1012 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + "unicode" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/internal/editions" + "github.com/bufbuild/protocompile/reporter" +) + +type result struct { + file *ast.FileNode + proto *descriptorpb.FileDescriptorProto + + nodes map[proto.Message]ast.Node + ifNoAST *ast.NoSourceNode +} + +// ResultWithoutAST returns a parse result that has no AST. All methods for +// looking up AST nodes return a placeholder node that contains only the filename +// in position information. +func ResultWithoutAST(proto *descriptorpb.FileDescriptorProto) Result { + return &result{proto: proto, ifNoAST: ast.NewNoSourceNode(proto.GetName())} +} + +// ResultFromAST constructs a descriptor proto from the given AST. The returned +// result includes the descriptor proto and also contains an index that can be +// used to lookup AST node information for elements in the descriptor proto +// hierarchy. +// +// If validate is true, some basic validation is performed, to make sure the +// resulting descriptor proto is valid per protobuf rules and semantics. Only +// some language elements can be validated since some rules and semantics can +// only be checked after all symbols are all resolved, which happens in the +// linking step. +// +// The given handler is used to report any errors or warnings encountered. If any +// errors are reported, this function returns a non-nil error. +func ResultFromAST(file *ast.FileNode, validate bool, handler *reporter.Handler) (Result, error) { + filename := file.Name() + r := &result{file: file, nodes: map[proto.Message]ast.Node{}} + r.createFileDescriptor(filename, file, handler) + if validate { + validateBasic(r, handler) + } + // Now that we're done validating, we can set any missing labels to optional + // (we leave them absent in first pass if label was missing in source, so we + // can do validation on presence of label, but final descriptors are expected + // to always have them present). + fillInMissingLabels(r.proto) + return r, handler.Error() +} + +func (r *result) AST() *ast.FileNode { + return r.file +} + +func (r *result) FileDescriptorProto() *descriptorpb.FileDescriptorProto { + return r.proto +} + +func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handler *reporter.Handler) { + fd := &descriptorpb.FileDescriptorProto{Name: proto.String(filename)} + r.proto = fd + + r.putFileNode(fd, file) + + var syntax protoreflect.Syntax + switch { + case file.Syntax != nil: + switch file.Syntax.Syntax.AsString() { + case "proto3": + syntax = protoreflect.Proto3 + case "proto2": + syntax = protoreflect.Proto2 + default: + nodeInfo := file.NodeInfo(file.Syntax.Syntax) + if handler.HandleErrorf(nodeInfo, `syntax value must be "proto2" or "proto3"`) != nil { + return + } + } + + // proto2 is the default, so no need to set for that value + if syntax != protoreflect.Proto2 { + fd.Syntax = proto.String(file.Syntax.Syntax.AsString()) + } + case file.Edition != nil: + edition := file.Edition.Edition.AsString() + syntax = protoreflect.Editions + + fd.Syntax = proto.String("editions") + editionEnum, ok := editions.SupportedEditions[edition] + if !ok { + nodeInfo := file.NodeInfo(file.Edition.Edition) + editionStrs := make([]string, 0, len(editions.SupportedEditions)) + for supportedEdition := range editions.SupportedEditions { + editionStrs = append(editionStrs, fmt.Sprintf("%q", supportedEdition)) + } + sort.Strings(editionStrs) + if handler.HandleErrorf(nodeInfo, `edition value %q not recognized; should be one of [%s]`, edition, strings.Join(editionStrs, ",")) != nil { + return + } + } + fd.Edition = editionEnum.Enum() + default: + syntax = protoreflect.Proto2 + nodeInfo := file.NodeInfo(file) + handler.HandleWarningWithPos(nodeInfo, ErrNoSyntax) + } + + for _, decl := range file.Decls { + if handler.ReporterError() != nil { + return + } + switch decl := decl.(type) { + case *ast.EnumNode: + fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl, syntax, handler)) + case *ast.ExtendNode: + r.addExtensions(decl, &fd.Extension, &fd.MessageType, syntax, handler, 0) + case *ast.ImportNode: + index := len(fd.Dependency) + fd.Dependency = append(fd.Dependency, decl.Name.AsString()) + if decl.Public != nil { + fd.PublicDependency = append(fd.PublicDependency, int32(index)) + } else if decl.Weak != nil { + fd.WeakDependency = append(fd.WeakDependency, int32(index)) + } + case *ast.MessageNode: + fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl, syntax, handler, 1)) + case *ast.OptionNode: + if fd.Options == nil { + fd.Options = &descriptorpb.FileOptions{} + } + fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.ServiceNode: + fd.Service = append(fd.Service, r.asServiceDescriptor(decl)) + case *ast.PackageNode: + if fd.Package != nil { + nodeInfo := file.NodeInfo(decl) + if handler.HandleErrorf(nodeInfo, "files should have only one package declaration") != nil { + return + } + } + pkgName := string(decl.Name.AsIdentifier()) + if len(pkgName) >= 512 { + nodeInfo := file.NodeInfo(decl.Name) + if handler.HandleErrorf(nodeInfo, "package name (with whitespace removed) must be less than 512 characters long") != nil { + return + } + } + if strings.Count(pkgName, ".") > 100 { + nodeInfo := file.NodeInfo(decl.Name) + if handler.HandleErrorf(nodeInfo, "package name may not contain more than 100 periods") != nil { + return + } + } + fd.Package = proto.String(string(decl.Name.AsIdentifier())) + } + } +} + +func (r *result) asUninterpretedOptions(nodes []*ast.OptionNode) []*descriptorpb.UninterpretedOption { + if len(nodes) == 0 { + return nil + } + opts := make([]*descriptorpb.UninterpretedOption, len(nodes)) + for i, n := range nodes { + opts[i] = r.asUninterpretedOption(n) + } + return opts +} + +func (r *result) asUninterpretedOption(node *ast.OptionNode) *descriptorpb.UninterpretedOption { + opt := &descriptorpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.Name.Parts)} + r.putOptionNode(opt, node) + + switch val := node.Val.Value().(type) { + case bool: + if val { + opt.IdentifierValue = proto.String("true") + } else { + opt.IdentifierValue = proto.String("false") + } + case int64: + opt.NegativeIntValue = proto.Int64(val) + case uint64: + opt.PositiveIntValue = proto.Uint64(val) + case float64: + opt.DoubleValue = proto.Float64(val) + case string: + opt.StringValue = []byte(val) + case ast.Identifier: + opt.IdentifierValue = proto.String(string(val)) + default: + // the grammar does not allow arrays here, so the only possible case + // left should be []*ast.MessageFieldNode, which corresponds to an + // *ast.MessageLiteralNode + if n, ok := node.Val.(*ast.MessageLiteralNode); ok { + var buf bytes.Buffer + for i, el := range n.Elements { + flattenNode(r.file, el, &buf) + if len(n.Seps) > i && n.Seps[i] != nil { + buf.WriteRune(' ') + buf.WriteRune(n.Seps[i].Rune) + } + } + aggStr := buf.String() + opt.AggregateValue = proto.String(aggStr) + } + // TODO: else that reports an error or panics?? + } + return opt +} + +func flattenNode(f *ast.FileNode, n ast.Node, buf *bytes.Buffer) { + if cn, ok := n.(ast.CompositeNode); ok { + for _, ch := range cn.Children() { + flattenNode(f, ch, buf) + } + return + } + + if buf.Len() > 0 { + buf.WriteRune(' ') + } + buf.WriteString(f.NodeInfo(n).RawText()) +} + +func (r *result) asUninterpretedOptionName(parts []*ast.FieldReferenceNode) []*descriptorpb.UninterpretedOption_NamePart { + ret := make([]*descriptorpb.UninterpretedOption_NamePart, len(parts)) + for i, part := range parts { + np := &descriptorpb.UninterpretedOption_NamePart{ + NamePart: proto.String(string(part.Name.AsIdentifier())), + IsExtension: proto.Bool(part.IsExtension()), + } + r.putOptionNamePartNode(np, part) + ret[i] = np + } + return ret +} + +func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldDescriptorProto, msgs *[]*descriptorpb.DescriptorProto, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) { + extendee := string(ext.Extendee.AsIdentifier()) + count := 0 + for _, decl := range ext.Decls { + switch decl := decl.(type) { + case *ast.FieldNode: + count++ + // use higher limit since we don't know yet whether extendee is messageset wire format + fd := r.asFieldDescriptor(decl, internal.MaxTag, syntax, handler) + fd.Extendee = proto.String(extendee) + *flds = append(*flds, fd) + case *ast.GroupNode: + count++ + // ditto: use higher limit right now + fd, md := r.asGroupDescriptors(decl, syntax, internal.MaxTag, handler, depth+1) + fd.Extendee = proto.String(extendee) + *flds = append(*flds, fd) + *msgs = append(*msgs, md) + } + } + if count == 0 { + nodeInfo := r.file.NodeInfo(ext) + _ = handler.HandleErrorf(nodeInfo, "extend sections must define at least one extension") + } +} + +func asLabel(lbl *ast.FieldLabel) *descriptorpb.FieldDescriptorProto_Label { + if !lbl.IsPresent() { + return nil + } + switch { + case lbl.Repeated: + return descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum() + case lbl.Required: + return descriptorpb.FieldDescriptorProto_LABEL_REQUIRED.Enum() + default: + return descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } +} + +func (r *result) asFieldDescriptor(node *ast.FieldNode, maxTag int32, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.FieldDescriptorProto { + var tag *int32 + if node.Tag != nil { + if err := r.checkTag(node.Tag, node.Tag.Val, maxTag); err != nil { + _ = handler.HandleError(err) + } + tag = proto.Int32(int32(node.Tag.Val)) + } + fd := newFieldDescriptor(node.Name.Val, string(node.FldType.AsIdentifier()), tag, asLabel(&node.Label)) + r.putFieldNode(fd, node) + if opts := node.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + if syntax == protoreflect.Proto3 && fd.Label != nil && fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + fd.Proto3Optional = proto.Bool(true) + } + return fd +} + +var fieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{ + "double": descriptorpb.FieldDescriptorProto_TYPE_DOUBLE, + "float": descriptorpb.FieldDescriptorProto_TYPE_FLOAT, + "int32": descriptorpb.FieldDescriptorProto_TYPE_INT32, + "int64": descriptorpb.FieldDescriptorProto_TYPE_INT64, + "uint32": descriptorpb.FieldDescriptorProto_TYPE_UINT32, + "uint64": descriptorpb.FieldDescriptorProto_TYPE_UINT64, + "sint32": descriptorpb.FieldDescriptorProto_TYPE_SINT32, + "sint64": descriptorpb.FieldDescriptorProto_TYPE_SINT64, + "fixed32": descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + "fixed64": descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + "sfixed32": descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + "sfixed64": descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + "bool": descriptorpb.FieldDescriptorProto_TYPE_BOOL, + "string": descriptorpb.FieldDescriptorProto_TYPE_STRING, + "bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES, +} + +func newFieldDescriptor(name string, fieldType string, tag *int32, lbl *descriptorpb.FieldDescriptorProto_Label) *descriptorpb.FieldDescriptorProto { + fd := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(name), + JsonName: proto.String(internal.JSONName(name)), + Number: tag, + Label: lbl, + } + t, ok := fieldTypes[fieldType] + if ok { + fd.Type = t.Enum() + } else { + // NB: we don't have enough info to determine whether this is an enum + // or a message type, so we'll leave Type nil and set it later + // (during linking) + fd.TypeName = proto.String(fieldType) + } + return fd +} + +func (r *result) asGroupDescriptors(group *ast.GroupNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + var tag *int32 + if group.Tag != nil { + if err := r.checkTag(group.Tag, group.Tag.Val, maxTag); err != nil { + _ = handler.HandleError(err) + } + tag = proto.Int32(int32(group.Tag.Val)) + } + if !unicode.IsUpper(rune(group.Name.Val[0])) { + nameNodeInfo := r.file.NodeInfo(group.Name) + _ = handler.HandleErrorf(nameNodeInfo, "group %s should have a name that starts with a capital letter", group.Name.Val) + } + fieldName := strings.ToLower(group.Name.Val) + fd := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(fieldName), + JsonName: proto.String(internal.JSONName(fieldName)), + Number: tag, + Label: asLabel(&group.Label), + Type: descriptorpb.FieldDescriptorProto_TYPE_GROUP.Enum(), + TypeName: proto.String(group.Name.Val), + } + r.putFieldNode(fd, group) + if opts := group.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + md := &descriptorpb.DescriptorProto{Name: proto.String(group.Name.Val)} + groupMsg := group.AsMessage() + r.putMessageNode(md, groupMsg) + // don't bother processing body if we've exceeded depth + if r.checkDepth(depth, groupMsg, handler) { + r.addMessageBody(md, &group.MessageBody, syntax, handler, depth) + } + return fd, md +} + +func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + var tag *int32 + if mapField.Tag != nil { + if err := r.checkTag(mapField.Tag, mapField.Tag.Val, maxTag); err != nil { + _ = handler.HandleError(err) + } + tag = proto.Int32(int32(mapField.Tag.Val)) + } + mapEntry := mapField.AsMessage() + r.checkDepth(depth, mapEntry, handler) + var lbl *descriptorpb.FieldDescriptorProto_Label + if syntax == protoreflect.Proto2 { + lbl = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + keyFd := newFieldDescriptor("key", mapField.MapType.KeyType.Val, proto.Int32(1), lbl) + r.putFieldNode(keyFd, mapField.KeyField()) + valFd := newFieldDescriptor("value", string(mapField.MapType.ValueType.AsIdentifier()), proto.Int32(2), lbl) + r.putFieldNode(valFd, mapField.ValueField()) + entryName := internal.InitCap(internal.JSONName(mapField.Name.Val)) + "Entry" + fd := newFieldDescriptor(mapField.Name.Val, entryName, tag, descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum()) + if opts := mapField.Options.GetElements(); len(opts) > 0 { + fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + r.putFieldNode(fd, mapField) + md := &descriptorpb.DescriptorProto{ + Name: proto.String(entryName), + Options: &descriptorpb.MessageOptions{MapEntry: proto.Bool(true)}, + Field: []*descriptorpb.FieldDescriptorProto{keyFd, valFd}, + } + r.putMessageNode(md, mapEntry) + return fd, md +} + +func (r *result) asExtensionRanges(node *ast.ExtensionRangeNode, maxTag int32, handler *reporter.Handler) []*descriptorpb.DescriptorProto_ExtensionRange { + opts := r.asUninterpretedOptions(node.Options.GetElements()) + ers := make([]*descriptorpb.DescriptorProto_ExtensionRange, len(node.Ranges)) + for i, rng := range node.Ranges { + start, end := r.getRangeBounds(rng, 1, maxTag, handler) + er := &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(start), + End: proto.Int32(end + 1), + } + if len(opts) > 0 { + er.Options = &descriptorpb.ExtensionRangeOptions{UninterpretedOption: opts} + } + r.putExtensionRangeNode(er, node, rng) + ers[i] = er + } + return ers +} + +func (r *result) asEnumValue(ev *ast.EnumValueNode, handler *reporter.Handler) *descriptorpb.EnumValueDescriptorProto { + num, ok := ast.AsInt32(ev.Number, math.MinInt32, math.MaxInt32) + if !ok { + numberNodeInfo := r.file.NodeInfo(ev.Number) + _ = handler.HandleErrorf(numberNodeInfo, "value %d is out of range: should be between %d and %d", ev.Number.Value(), math.MinInt32, math.MaxInt32) + } + evd := &descriptorpb.EnumValueDescriptorProto{Name: proto.String(ev.Name.Val), Number: proto.Int32(num)} + r.putEnumValueNode(evd, ev) + if opts := ev.Options.GetElements(); len(opts) > 0 { + evd.Options = &descriptorpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} + } + return evd +} + +func (r *result) asMethodDescriptor(node *ast.RPCNode) *descriptorpb.MethodDescriptorProto { + md := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(node.Name.Val), + InputType: proto.String(string(node.Input.MessageType.AsIdentifier())), + OutputType: proto.String(string(node.Output.MessageType.AsIdentifier())), + } + r.putMethodNode(md, node) + if node.Input.Stream != nil { + md.ClientStreaming = proto.Bool(true) + } + if node.Output.Stream != nil { + md.ServerStreaming = proto.Bool(true) + } + // protoc always adds a MethodOptions if there are brackets + // We do the same to match protoc as closely as possible + // https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152 + if node.OpenBrace != nil { + md.Options = &descriptorpb.MethodOptions{} + for _, decl := range node.Decls { + if option, ok := decl.(*ast.OptionNode); ok { + md.Options.UninterpretedOption = append(md.Options.UninterpretedOption, r.asUninterpretedOption(option)) + } + } + } + return md +} + +func (r *result) asEnumDescriptor(en *ast.EnumNode, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto { + ed := &descriptorpb.EnumDescriptorProto{Name: proto.String(en.Name.Val)} + r.putEnumNode(ed, en) + rsvdNames := map[string]ast.SourcePos{} + for _, decl := range en.Decls { + switch decl := decl.(type) { + case *ast.OptionNode: + if ed.Options == nil { + ed.Options = &descriptorpb.EnumOptions{} + } + ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.EnumValueNode: + ed.Value = append(ed.Value, r.asEnumValue(decl, handler)) + case *ast.ReservedNode: + r.addReservedNames(&ed.ReservedName, decl, syntax, handler, rsvdNames) + for _, rng := range decl.Ranges { + ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng, handler)) + } + } + } + return ed +} + +func (r *result) asEnumReservedRange(rng *ast.RangeNode, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto_EnumReservedRange { + start, end := r.getRangeBounds(rng, math.MinInt32, math.MaxInt32, handler) + rr := &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(start), + End: proto.Int32(end), + } + r.putEnumReservedRangeNode(rr, rng) + return rr +} + +func (r *result) asMessageDescriptor(node *ast.MessageNode, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) *descriptorpb.DescriptorProto { + msgd := &descriptorpb.DescriptorProto{Name: proto.String(node.Name.Val)} + r.putMessageNode(msgd, node) + // don't bother processing body if we've exceeded depth + if r.checkDepth(depth, node, handler) { + r.addMessageBody(msgd, &node.MessageBody, syntax, handler, depth) + } + return msgd +} + +func (r *result) addReservedNames(names *[]string, node *ast.ReservedNode, syntax protoreflect.Syntax, handler *reporter.Handler, alreadyReserved map[string]ast.SourcePos) { + if syntax == protoreflect.Editions { + if len(node.Names) > 0 { + nameNodeInfo := r.file.NodeInfo(node.Names[0]) + _ = handler.HandleErrorf(nameNodeInfo, `must use identifiers, not string literals, to reserved names with editions`) + } + for _, n := range node.Identifiers { + name := string(n.AsIdentifier()) + nameNodeInfo := r.file.NodeInfo(n) + if existing, ok := alreadyReserved[name]; ok { + _ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing) + continue + } + alreadyReserved[name] = nameNodeInfo.Start() + *names = append(*names, name) + } + return + } + + if len(node.Identifiers) > 0 { + nameNodeInfo := r.file.NodeInfo(node.Identifiers[0]) + _ = handler.HandleErrorf(nameNodeInfo, `must use string literals, not identifiers, to reserved names with proto2 and proto3`) + } + for _, n := range node.Names { + name := n.AsString() + nameNodeInfo := r.file.NodeInfo(n) + if existing, ok := alreadyReserved[name]; ok { + _ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing) + continue + } + alreadyReserved[name] = nameNodeInfo.Start() + *names = append(*names, name) + } +} + +func (r *result) checkDepth(depth int, node ast.MessageDeclNode, handler *reporter.Handler) bool { + if depth < 32 { + return true + } + n := ast.Node(node) + if grp, ok := n.(*ast.SyntheticGroupMessageNode); ok { + // pinpoint the group keyword if the source is a group + n = grp.Keyword + } + _ = handler.HandleErrorf(r.file.NodeInfo(n), "message nesting depth must be less than 32") + return false +} + +func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.MessageBody, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) { + // first process any options + for _, decl := range body.Decls { + if opt, ok := decl.(*ast.OptionNode); ok { + if msgd.Options == nil { + msgd.Options = &descriptorpb.MessageOptions{} + } + msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(opt)) + } + } + + // now that we have options, we can see if this uses messageset wire format, which + // impacts how we validate tag numbers in any fields in the message + maxTag := int32(internal.MaxNormalTag) + messageSetOpt, err := r.isMessageSetWireFormat("message "+msgd.GetName(), msgd, handler) + if err != nil { + return + } else if messageSetOpt != nil { + if syntax == protoreflect.Proto3 { + node := r.OptionNode(messageSetOpt) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format are not allowed with proto3 syntax") + } + maxTag = internal.MaxTag // higher limit for messageset wire format + } + + rsvdNames := map[string]ast.SourcePos{} + + // now we can process the rest + for _, decl := range body.Decls { + switch decl := decl.(type) { + case *ast.EnumNode: + msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl, syntax, handler)) + case *ast.ExtendNode: + r.addExtensions(decl, &msgd.Extension, &msgd.NestedType, syntax, handler, depth) + case *ast.ExtensionRangeNode: + msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl, maxTag, handler)...) + case *ast.FieldNode: + fd := r.asFieldDescriptor(decl, maxTag, syntax, handler) + msgd.Field = append(msgd.Field, fd) + case *ast.MapFieldNode: + fd, md := r.asMapDescriptors(decl, syntax, maxTag, handler, depth+1) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + case *ast.GroupNode: + fd, md := r.asGroupDescriptors(decl, syntax, maxTag, handler, depth+1) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + case *ast.OneofNode: + oodIndex := len(msgd.OneofDecl) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(decl.Name.Val)} + r.putOneofNode(ood, decl) + msgd.OneofDecl = append(msgd.OneofDecl, ood) + ooFields := 0 + for _, oodecl := range decl.Decls { + switch oodecl := oodecl.(type) { + case *ast.OptionNode: + if ood.Options == nil { + ood.Options = &descriptorpb.OneofOptions{} + } + ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl)) + case *ast.FieldNode: + fd := r.asFieldDescriptor(oodecl, maxTag, syntax, handler) + fd.OneofIndex = proto.Int32(int32(oodIndex)) + msgd.Field = append(msgd.Field, fd) + ooFields++ + case *ast.GroupNode: + fd, md := r.asGroupDescriptors(oodecl, syntax, maxTag, handler, depth+1) + fd.OneofIndex = proto.Int32(int32(oodIndex)) + msgd.Field = append(msgd.Field, fd) + msgd.NestedType = append(msgd.NestedType, md) + ooFields++ + } + } + if ooFields == 0 { + declNodeInfo := r.file.NodeInfo(decl) + _ = handler.HandleErrorf(declNodeInfo, "oneof must contain at least one field") + } + case *ast.MessageNode: + msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl, syntax, handler, depth+1)) + case *ast.ReservedNode: + r.addReservedNames(&msgd.ReservedName, decl, syntax, handler, rsvdNames) + for _, rng := range decl.Ranges { + msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng, maxTag, handler)) + } + } + } + + if messageSetOpt != nil { + if len(msgd.Field) > 0 { + node := r.FieldNode(msgd.Field[0]) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format cannot contain non-extension fields") + } + if len(msgd.ExtensionRange) == 0 { + node := r.OptionNode(messageSetOpt) + nodeInfo := r.file.NodeInfo(node) + _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format must contain at least one extension range") + } + } + + // process any proto3_optional fields + if syntax == protoreflect.Proto3 { + r.processProto3OptionalFields(msgd) + } +} + +func (r *result) isMessageSetWireFormat(scope string, md *descriptorpb.DescriptorProto, handler *reporter.Handler) (*descriptorpb.UninterpretedOption, error) { + uo := md.GetOptions().GetUninterpretedOption() + index, err := internal.FindOption(r, handler.HandleErrorf, scope, uo, "message_set_wire_format") + if err != nil { + return nil, err + } + if index == -1 { + // no such option + return nil, nil + } + + opt := uo[index] + + switch opt.GetIdentifierValue() { + case "true": + return opt, nil + case "false": + return nil, nil + default: + optNode := r.OptionNode(opt) + optNodeInfo := r.file.NodeInfo(optNode.GetValue()) + return nil, handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for message_set_wire_format option", scope) + } +} + +func (r *result) asMessageReservedRange(rng *ast.RangeNode, maxTag int32, handler *reporter.Handler) *descriptorpb.DescriptorProto_ReservedRange { + start, end := r.getRangeBounds(rng, 1, maxTag, handler) + rr := &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(start), + End: proto.Int32(end + 1), + } + r.putMessageReservedRangeNode(rr, rng) + return rr +} + +func (r *result) getRangeBounds(rng *ast.RangeNode, minVal, maxVal int32, handler *reporter.Handler) (int32, int32) { + checkOrder := true + start, ok := rng.StartValueAsInt32(minVal, maxVal) + if !ok { + checkOrder = false + startValNodeInfo := r.file.NodeInfo(rng.StartVal) + _ = handler.HandleErrorf(startValNodeInfo, "range start %d is out of range: should be between %d and %d", rng.StartValue(), minVal, maxVal) + } + + end, ok := rng.EndValueAsInt32(minVal, maxVal) + if !ok { + checkOrder = false + if rng.EndVal != nil { + endValNodeInfo := r.file.NodeInfo(rng.EndVal) + _ = handler.HandleErrorf(endValNodeInfo, "range end %d is out of range: should be between %d and %d", rng.EndValue(), minVal, maxVal) + } + } + + if checkOrder && start > end { + rangeStartNodeInfo := r.file.NodeInfo(rng.RangeStart()) + _ = handler.HandleErrorf(rangeStartNodeInfo, "range, %d to %d, is invalid: start must be <= end", start, end) + } + + return start, end +} + +func (r *result) asServiceDescriptor(svc *ast.ServiceNode) *descriptorpb.ServiceDescriptorProto { + sd := &descriptorpb.ServiceDescriptorProto{Name: proto.String(svc.Name.Val)} + r.putServiceNode(sd, svc) + for _, decl := range svc.Decls { + switch decl := decl.(type) { + case *ast.OptionNode: + if sd.Options == nil { + sd.Options = &descriptorpb.ServiceOptions{} + } + sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl)) + case *ast.RPCNode: + sd.Method = append(sd.Method, r.asMethodDescriptor(decl)) + } + } + return sd +} + +func (r *result) checkTag(n ast.Node, v uint64, maxTag int32) error { + switch { + case v < 1: + return reporter.Errorf(r.file.NodeInfo(n), "tag number %d must be greater than zero", v) + case v > uint64(maxTag): + return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is higher than max allowed tag number (%d)", v, maxTag) + case v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd: + return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd) + default: + return nil + } +} + +// processProto3OptionalFields adds synthetic oneofs to the given message descriptor +// for each proto3 optional field. It also updates the fields to have the correct +// oneof index reference. +func (r *result) processProto3OptionalFields(msgd *descriptorpb.DescriptorProto) { + // add synthetic oneofs to the given message descriptor for each proto3 + // optional field, and update each field to have correct oneof index + var allNames map[string]struct{} + for _, fd := range msgd.Field { + if fd.GetProto3Optional() { + // lazy init the set of all names + if allNames == nil { + allNames = map[string]struct{}{} + for _, fd := range msgd.Field { + allNames[fd.GetName()] = struct{}{} + } + for _, od := range msgd.OneofDecl { + allNames[od.GetName()] = struct{}{} + } + // NB: protoc only considers names of other fields and oneofs + // when computing the synthetic oneof name. But that feels like + // a bug, since it means it could generate a name that conflicts + // with some other symbol defined in the message. If it's decided + // that's NOT a bug and is desirable, then we should remove the + // following four loops to mimic protoc's behavior. + for _, fd := range msgd.Extension { + allNames[fd.GetName()] = struct{}{} + } + for _, ed := range msgd.EnumType { + allNames[ed.GetName()] = struct{}{} + for _, evd := range ed.Value { + allNames[evd.GetName()] = struct{}{} + } + } + for _, fd := range msgd.NestedType { + allNames[fd.GetName()] = struct{}{} + } + } + + // Compute a name for the synthetic oneof. This uses the same + // algorithm as used in protoc: + // https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803 + ooName := fd.GetName() + if !strings.HasPrefix(ooName, "_") { + ooName = "_" + ooName + } + for { + _, ok := allNames[ooName] + if !ok { + // found a unique name + allNames[ooName] = struct{}{} + break + } + ooName = "X" + ooName + } + + fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl))) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)} + msgd.OneofDecl = append(msgd.OneofDecl, ood) + ooident := r.FieldNode(fd).(*ast.FieldNode) //nolint:errcheck + r.putOneofNode(ood, ast.NewSyntheticOneof(ooident)) + } + } +} + +func (r *result) Node(m proto.Message) ast.Node { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[m] +} + +func (r *result) FileNode() ast.FileDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[r.proto].(ast.FileDeclNode) +} + +func (r *result) OptionNode(o *descriptorpb.UninterpretedOption) ast.OptionDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[o].(ast.OptionDeclNode) +} + +func (r *result) OptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart) ast.Node { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[o] +} + +func (r *result) MessageNode(m *descriptorpb.DescriptorProto) ast.MessageDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[m].(ast.MessageDeclNode) +} + +func (r *result) FieldNode(f *descriptorpb.FieldDescriptorProto) ast.FieldDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[f].(ast.FieldDeclNode) +} + +func (r *result) OneofNode(o *descriptorpb.OneofDescriptorProto) ast.OneofDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[o].(ast.OneofDeclNode) +} + +func (r *result) ExtensionsNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[asExtsNode(e)].(ast.NodeWithOptions) +} + +func (r *result) ExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[e].(ast.RangeDeclNode) +} + +func (r *result) MessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[rr].(ast.RangeDeclNode) +} + +func (r *result) EnumNode(e *descriptorpb.EnumDescriptorProto) ast.NodeWithOptions { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[e].(ast.NodeWithOptions) +} + +func (r *result) EnumValueNode(e *descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[e].(ast.EnumValueDeclNode) +} + +func (r *result) EnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[rr].(ast.RangeDeclNode) +} + +func (r *result) ServiceNode(s *descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[s].(ast.NodeWithOptions) +} + +func (r *result) MethodNode(m *descriptorpb.MethodDescriptorProto) ast.RPCDeclNode { + if r.nodes == nil { + return r.ifNoAST + } + return r.nodes[m].(ast.RPCDeclNode) +} + +func (r *result) putFileNode(f *descriptorpb.FileDescriptorProto, n *ast.FileNode) { + r.nodes[f] = n +} + +func (r *result) putOptionNode(o *descriptorpb.UninterpretedOption, n *ast.OptionNode) { + r.nodes[o] = n +} + +func (r *result) putOptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart, n *ast.FieldReferenceNode) { + r.nodes[o] = n +} + +func (r *result) putMessageNode(m *descriptorpb.DescriptorProto, n ast.MessageDeclNode) { + r.nodes[m] = n +} + +func (r *result) putFieldNode(f *descriptorpb.FieldDescriptorProto, n ast.FieldDeclNode) { + r.nodes[f] = n +} + +func (r *result) putOneofNode(o *descriptorpb.OneofDescriptorProto, n ast.OneofDeclNode) { + r.nodes[o] = n +} + +func (r *result) putExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange, er *ast.ExtensionRangeNode, n *ast.RangeNode) { + r.nodes[asExtsNode(e)] = er + r.nodes[e] = n +} + +func (r *result) putMessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange, n *ast.RangeNode) { + r.nodes[rr] = n +} + +func (r *result) putEnumNode(e *descriptorpb.EnumDescriptorProto, n *ast.EnumNode) { + r.nodes[e] = n +} + +func (r *result) putEnumValueNode(e *descriptorpb.EnumValueDescriptorProto, n *ast.EnumValueNode) { + r.nodes[e] = n +} + +func (r *result) putEnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange, n *ast.RangeNode) { + r.nodes[rr] = n +} + +func (r *result) putServiceNode(s *descriptorpb.ServiceDescriptorProto, n *ast.ServiceNode) { + r.nodes[s] = n +} + +func (r *result) putMethodNode(m *descriptorpb.MethodDescriptorProto, n *ast.RPCNode) { + r.nodes[m] = n +} + +// NB: If we ever add other put*Node methods, to index other kinds of elements in the descriptor +// proto hierarchy, we need to update the index recreation logic in clone.go, too. + +func asExtsNode(er *descriptorpb.DescriptorProto_ExtensionRange) proto.Message { + return extsParent{er} +} + +// a simple marker type that allows us to have two distinct keys in a map for +// the same ExtensionRange proto -- one for the range itself and another to +// associate with the enclosing/parent AST node. +type extsParent struct { + *descriptorpb.DescriptorProto_ExtensionRange +} diff --git a/vendor/github.com/bufbuild/protocompile/parser/validate.go b/vendor/github.com/bufbuild/protocompile/parser/validate.go new file mode 100644 index 00000000..64ebdaa3 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/parser/validate.go @@ -0,0 +1,568 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/walk" +) + +func validateBasic(res *result, handler *reporter.Handler) { + fd := res.proto + var syntax protoreflect.Syntax + switch fd.GetSyntax() { + case "", "proto2": + syntax = protoreflect.Proto2 + case "proto3": + syntax = protoreflect.Proto3 + case "editions": + syntax = protoreflect.Editions + // TODO: default: error? + } + + if err := validateImports(res, handler); err != nil { + return + } + + if err := validateNoFeatures(res, syntax, "file options", fd.Options.GetUninterpretedOption(), handler); err != nil { + return + } + + _ = walk.DescriptorProtos(fd, + func(name protoreflect.FullName, d proto.Message) error { + switch d := d.(type) { + case *descriptorpb.DescriptorProto: + if err := validateMessage(res, syntax, name, d, handler); err != nil { + // exit func is not called when enter returns error + return err + } + case *descriptorpb.FieldDescriptorProto: + if err := validateField(res, syntax, name, d, handler); err != nil { + return err + } + case *descriptorpb.OneofDescriptorProto: + if err := validateNoFeatures(res, syntax, fmt.Sprintf("oneof %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + case *descriptorpb.EnumDescriptorProto: + if err := validateEnum(res, syntax, name, d, handler); err != nil { + return err + } + case *descriptorpb.EnumValueDescriptorProto: + if err := validateNoFeatures(res, syntax, fmt.Sprintf("enum value %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + case *descriptorpb.ServiceDescriptorProto: + if err := validateNoFeatures(res, syntax, fmt.Sprintf("service %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + case *descriptorpb.MethodDescriptorProto: + if err := validateNoFeatures(res, syntax, fmt.Sprintf("method %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + } + return nil + }) +} + +func validateImports(res *result, handler *reporter.Handler) error { + fileNode := res.file + if fileNode == nil { + return nil + } + imports := make(map[string]ast.SourcePos) + for _, decl := range fileNode.Decls { + imp, ok := decl.(*ast.ImportNode) + if !ok { + continue + } + info := fileNode.NodeInfo(decl) + name := imp.Name.AsString() + if prev, ok := imports[name]; ok { + return handler.HandleErrorf(info, "%q was already imported at %v", name, prev) + } + imports[name] = info.Start() + } + return nil +} + +func validateNoFeatures(res *result, syntax protoreflect.Syntax, scope string, opts []*descriptorpb.UninterpretedOption, handler *reporter.Handler) error { + if syntax == protoreflect.Editions { + // Editions is allowed to use features + return nil + } + if index, err := internal.FindFirstOption(res, handler.HandleErrorf, scope, opts, "features"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(opts[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo, "%s: option 'features' may only be used with editions but file uses %s syntax", scope, syntax); err != nil { + return err + } + } + return nil +} + +func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("message %s", name) + + if syntax == protoreflect.Proto3 && len(md.ExtensionRange) > 0 { + n := res.ExtensionRangeNode(md.ExtensionRange[0]) + nInfo := res.file.NodeInfo(n) + if err := handler.HandleErrorf(nInfo, "%s: extension ranges are not allowed in proto3", scope); err != nil { + return err + } + } + + if index, err := internal.FindOption(res, handler.HandleErrorf, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(md.Options.GetUninterpretedOption()[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo, "%s: map_entry option should not be set explicitly; use map type instead", scope); err != nil { + return err + } + } + + if err := validateNoFeatures(res, syntax, scope, md.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + + // reserved ranges should not overlap + rsvd := make(tagRanges, len(md.ReservedRange)) + for i, r := range md.ReservedRange { + n := res.MessageReservedRangeNode(r) + rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(rsvd) + for i := 1; i < len(rsvd); i++ { + if rsvd[i].start < rsvd[i-1].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + return err + } + } + } + + // extensions ranges should not overlap + exts := make(tagRanges, len(md.ExtensionRange)) + for i, r := range md.ExtensionRange { + if err := validateNoFeatures(res, syntax, scope, r.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + n := res.ExtensionRangeNode(r) + exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(exts) + for i := 1; i < len(exts); i++ { + if exts[i].start < exts[i-1].end { + rangeNodeInfo := res.file.NodeInfo(exts[i].node) + if err := handler.HandleErrorf(rangeNodeInfo, "%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1); err != nil { + return err + } + } + } + + // see if any extension range overlaps any reserved range + var i, j int // i indexes rsvd; j indexes exts + for i < len(rsvd) && j < len(exts) { + if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end || + exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end { + var span ast.SourceSpan + if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + span = rangeNodeInfo + } else { + rangeNodeInfo := res.file.NodeInfo(exts[j].node) + span = rangeNodeInfo + } + // ranges overlap + if err := handler.HandleErrorf(span, "%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + return err + } + } + if rsvd[i].start < exts[j].start { + i++ + } else { + j++ + } + } + + // now, check that fields don't re-use tags and don't try to use extension + // or reserved ranges or reserved names + rsvdNames := map[string]struct{}{} + for _, n := range md.ReservedName { + // validate reserved name while we're here + if !isIdentifier(n) { + node := findMessageReservedNameNode(res.MessageNode(md), n) + nodeInfo := res.file.NodeInfo(node) + if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + return err + } + } + rsvdNames[n] = struct{}{} + } + fieldTags := map[int32]string{} + for _, fld := range md.Field { + fn := res.FieldNode(fld) + if _, ok := rsvdNames[fld.GetName()]; ok { + fieldNameNodeInfo := res.file.NodeInfo(fn.FieldName()) + if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field %s is using a reserved name", scope, fld.GetName()); err != nil { + return err + } + } + if existing := fieldTags[fld.GetNumber()]; existing != "" { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber()); err != nil { + return err + } + } + fieldTags[fld.GetNumber()] = fld.GetName() + // check reserved ranges + r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() }) + if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1); err != nil { + return err + } + } + // and check extension ranges + e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() }) + if e < len(exts) && exts[e].start <= fld.GetNumber() { + fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) + if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1); err != nil { + return err + } + } + } + + return nil +} + +func isIdentifier(s string) bool { + if len(s) == 0 { + return false + } + for i, r := range s { + if i == 0 && r >= '0' && r <= '9' { + // can't start with number + return false + } + // alphanumeric and underscore ok; everything else bad + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r == '_': + default: + return false + } + } + return true +} + +func findMessageReservedNameNode(msgNode ast.MessageDeclNode, name string) ast.Node { + var decls []ast.MessageElement + switch msgNode := msgNode.(type) { + case *ast.MessageNode: + decls = msgNode.Decls + case *ast.SyntheticGroupMessageNode: + decls = msgNode.Decls + default: + // leave decls empty + } + return findReservedNameNode(msgNode, decls, name) +} + +func findReservedNameNode[T ast.Node](parent ast.Node, decls []T, name string) ast.Node { + for _, decl := range decls { + // NB: We have to convert to empty interface first, before we can do a type + // assertion because type assertions on type parameters aren't allowed. (The + // compiler cannot yet know whether T is an interface type or not.) + rsvd, ok := any(decl).(*ast.ReservedNode) + if !ok { + continue + } + for _, rsvdName := range rsvd.Names { + if rsvdName.AsString() == name { + return rsvdName + } + } + } + // couldn't find it? Instead of puking, report position of the parent. + return parent +} + +func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("enum %s", name) + + if len(ed.Value) == 0 { + enNode := res.EnumNode(ed) + enNodeInfo := res.file.NodeInfo(enNode) + if err := handler.HandleErrorf(enNodeInfo, "%s: enums must define at least one value", scope); err != nil { + return err + } + } + + if err := validateNoFeatures(res, syntax, scope, ed.Options.GetUninterpretedOption(), handler); err != nil { + return err + } + + allowAlias := false + var allowAliasOpt *descriptorpb.UninterpretedOption + if index, err := internal.FindOption(res, handler.HandleErrorf, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil { + return err + } else if index >= 0 { + allowAliasOpt = ed.Options.UninterpretedOption[index] + valid := false + if allowAliasOpt.IdentifierValue != nil { + if allowAliasOpt.GetIdentifierValue() == "true" { + allowAlias = true + valid = true + } else if allowAliasOpt.GetIdentifierValue() == "false" { + valid = true + } + } + if !valid { + optNode := res.OptionNode(allowAliasOpt) + optNodeInfo := res.file.NodeInfo(optNode.GetValue()) + if err := handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for allow_alias option", scope); err != nil { + return err + } + } + } + + if syntax == protoreflect.Proto3 && len(ed.Value) > 0 && ed.Value[0].GetNumber() != 0 { + evNode := res.EnumValueNode(ed.Value[0]) + evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) + if err := handler.HandleErrorf(evNodeInfo, "%s: proto3 requires that first value of enum have numeric value zero", scope); err != nil { + return err + } + } + + // check for aliases + vals := map[int32]string{} + hasAlias := false + for _, evd := range ed.Value { + existing := vals[evd.GetNumber()] + if existing != "" { + if allowAlias { + hasAlias = true + } else { + evNode := res.EnumValueNode(evd) + evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) + if err := handler.HandleErrorf(evNodeInfo, "%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber()); err != nil { + return err + } + } + } + vals[evd.GetNumber()] = evd.GetName() + } + if allowAlias && !hasAlias { + optNode := res.OptionNode(allowAliasOpt) + optNodeInfo := res.file.NodeInfo(optNode.GetValue()) + if err := handler.HandleErrorf(optNodeInfo, "%s: allow_alias is true but no values are aliases", scope); err != nil { + return err + } + } + + // reserved ranges should not overlap + rsvd := make(tagRanges, len(ed.ReservedRange)) + for i, r := range ed.ReservedRange { + n := res.EnumReservedRangeNode(r) + rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} + } + sort.Sort(rsvd) + for i := 1; i < len(rsvd); i++ { + if rsvd[i].start <= rsvd[i-1].end { + rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) + if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end); err != nil { + return err + } + } + } + + // now, check that fields don't re-use tags and don't try to use extension + // or reserved ranges or reserved names + rsvdNames := map[string]struct{}{} + for _, n := range ed.ReservedName { + // validate reserved name while we're here + if !isIdentifier(n) { + node := findEnumReservedNameNode(res.EnumNode(ed), n) + nodeInfo := res.file.NodeInfo(node) + if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + return err + } + } + rsvdNames[n] = struct{}{} + } + for _, ev := range ed.Value { + evn := res.EnumValueNode(ev) + if _, ok := rsvdNames[ev.GetName()]; ok { + enumValNodeInfo := res.file.NodeInfo(evn.GetName()) + if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using a reserved name", scope, ev.GetName()); err != nil { + return err + } + } + // check reserved ranges + r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() }) + if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() { + enumValNodeInfo := res.file.NodeInfo(evn.GetNumber()) + if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end); err != nil { + return err + } + } + } + + return nil +} + +func findEnumReservedNameNode(enumNode ast.Node, name string) ast.Node { + var decls []ast.EnumElement + if enumNode, ok := enumNode.(*ast.EnumNode); ok { + decls = enumNode.Decls + // if not the right type, we leave decls empty + } + return findReservedNameNode(enumNode, decls, name) +} + +func validateField(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, fld *descriptorpb.FieldDescriptorProto, handler *reporter.Handler) error { + var scope string + if fld.Extendee != nil { + scope = fmt.Sprintf("extension %s", name) + } else { + scope = fmt.Sprintf("field %s", name) + } + + node := res.FieldNode(fld) + if fld.Number == nil { + fieldTagNodeInfo := res.file.NodeInfo(node) + if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: missing field tag number", scope); err != nil { + return err + } + } + if syntax != protoreflect.Proto2 { + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + groupNodeInfo := res.file.NodeInfo(node.GetGroupKeyword()) + if err := handler.HandleErrorf(groupNodeInfo, "%s: groups are not allowed in proto3 or editions", scope); err != nil { + return err + } + } else if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) + if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'required' is not allowed in proto3 or editions", scope); err != nil { + return err + } + } + if syntax == protoreflect.Editions { + if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) + if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'optional' is not allowed in editions; use option features.field_presence instead", scope); err != nil { + return err + } + } + if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "packed"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo, "%s: packed option is not allowed in editions; use option features.repeated_field_encoding instead", scope); err != nil { + return err + } + } + } else if syntax == protoreflect.Proto3 { + if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo, "%s: default values are not allowed in proto3", scope); err != nil { + return err + } + } + } + } else { + if fld.Label == nil && fld.OneofIndex == nil { + fieldNameNodeInfo := res.file.NodeInfo(node.FieldName()) + if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field has no label; proto2 requires explicit 'optional' label", scope); err != nil { + return err + } + } + if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) + if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: extension fields cannot be 'required'", scope); err != nil { + return err + } + } + } + + return validateNoFeatures(res, syntax, scope, fld.Options.GetUninterpretedOption(), handler) +} + +type tagRange struct { + start int32 + end int32 + node ast.RangeDeclNode +} + +type tagRanges []tagRange + +func (r tagRanges) Len() int { + return len(r) +} + +func (r tagRanges) Less(i, j int) bool { + return r[i].start < r[j].start || + (r[i].start == r[j].start && r[i].end < r[j].end) +} + +func (r tagRanges) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func fillInMissingLabels(fd *descriptorpb.FileDescriptorProto) { + for _, md := range fd.MessageType { + fillInMissingLabelsInMsg(md) + } + for _, extd := range fd.Extension { + fillInMissingLabel(extd) + } +} + +func fillInMissingLabelsInMsg(md *descriptorpb.DescriptorProto) { + for _, fld := range md.Field { + fillInMissingLabel(fld) + } + for _, nmd := range md.NestedType { + fillInMissingLabelsInMsg(nmd) + } + for _, extd := range md.Extension { + fillInMissingLabel(extd) + } +} + +func fillInMissingLabel(fld *descriptorpb.FieldDescriptorProto) { + if fld.Label == nil { + fld.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } +} diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go new file mode 100644 index 00000000..fb21dff6 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go @@ -0,0 +1,140 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protoutil + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/bufbuild/protocompile/internal/editions" +) + +// GetFeatureDefault gets the default value for the given feature and the given +// edition. The given feature must represent a field of the google.protobuf.FeatureSet +// message and must not be an extension. +// +// If the given field is from a dynamically built descriptor (i.e. it's containing +// message descriptor is different from the linked-in descriptor for +// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such +// cases, the value may not be directly usable using [protoreflect.Message.Set] with +// an instance of [*descriptorpb.FeatureSet] and must instead be used with a +// [*dynamicpb.Message]. +// +// To get the default value of a custom feature, use [GetCustomFeatureDefault] +// instead. +func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { + if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() { + return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s", + feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName()) + } + var msgType protoreflect.MessageType + if feature.ContainingMessage() == editions.FeatureSetDescriptor { + msgType = editions.FeatureSetType + } else { + msgType = dynamicpb.NewMessageType(feature.ContainingMessage()) + } + return editions.GetFeatureDefault(edition, msgType, feature) +} + +// GetCustomFeatureDefault gets the default value for the given custom feature +// and given edition. A custom feature is a field whose containing message is the +// type of an extension field of google.protobuf.FeatureSet. The given extension +// describes that extension field and message type. The given feature must be a +// field of that extension's message type. +func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { + extDesc := extension.TypeDescriptor() + if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() { + return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName()) + } + if extDesc.Message() == nil { + return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s", + editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String()) + } + if feature.IsExtension() { + return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions", + feature.FullName(), extDesc.FullName()) + } + if feature.ContainingMessage().FullName() != extDesc.Message().FullName() { + return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s", + feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName()) + } + if feature.ContainingMessage() != extDesc.Message() { + return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s", + feature.Name(), extDesc.Message().FullName()) + } + return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature) +} + +// ResolveFeature resolves a feature for the given descriptor. +// +// If the given element is in a proto2 or proto3 syntax file, this skips +// resolution and just returns the relevant default (since such files are not +// allowed to override features). If neither the given element nor any of its +// ancestors override the given feature, the relevant default is returned. +// +// This has the same caveat as GetFeatureDefault if the given feature is from a +// dynamically built descriptor. +func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { + edition := editions.GetEdition(element) + defaultVal, err := GetFeatureDefault(edition, feature) + if err != nil { + return protoreflect.Value{}, err + } + return resolveFeature(edition, defaultVal, element, feature) +} + +// ResolveCustomFeature resolves a custom feature for the given extension and +// field descriptor. +// +// The given extension must be an extension of google.protobuf.FeatureSet that +// represents a non-repeated message value. The given feature is a field in +// that extension's message type. +// +// If the given element is in a proto2 or proto3 syntax file, this skips +// resolution and just returns the relevant default (since such files are not +// allowed to override features). If neither the given element nor any of its +// ancestors override the given feature, the relevant default is returned. +func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { + edition := editions.GetEdition(element) + defaultVal, err := GetCustomFeatureDefault(edition, extension, feature) + if err != nil { + return protoreflect.Value{}, err + } + return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature) +} + +func resolveFeature( + edition descriptorpb.Edition, + defaultVal protoreflect.Value, + element protoreflect.Descriptor, + fields ...protoreflect.FieldDescriptor, +) (protoreflect.Value, error) { + if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 { + // these syntax levels can't specify features, so we can short-circuit the search + // through the descriptor hierarchy for feature overrides + return defaultVal, nil + } + val, err := editions.ResolveFeature(element, fields...) + if err != nil { + return protoreflect.Value{}, err + } + if val.IsValid() { + return val, nil + } + return defaultVal, nil +} diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go new file mode 100644 index 00000000..9c559993 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go @@ -0,0 +1,262 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protoutil contains useful functions for interacting with descriptors. +// For now these include only functions for efficiently converting descriptors +// produced by the compiler to descriptor protos and functions for resolving +// "features" (a core concept of Protobuf Editions). +// +// Despite the fact that descriptor protos are mutable, calling code should NOT +// mutate any of the protos returned from this package. For efficiency, some +// values returned from this package may reference internal state of a compiler +// result, and mutating the proto could corrupt or invalidate parts of that +// result. +package protoutil + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" +) + +// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an +// underlying descriptor proto. It provides the same interface as +// Descriptor but with one extra operation, to efficiently query for +// the underlying descriptor proto. +// +// Descriptors that implement this will also implement another method +// whose specified return type is the concrete type returned by the +// AsProto method. The name of this method varies by the type of this +// descriptor: +// +// Descriptor Type Other Method Name +// ---------------------+------------------------------------ +// FileDescriptor | FileDescriptorProto() +// MessageDescriptor | MessageDescriptorProto() +// FieldDescriptor | FieldDescriptorProto() +// OneofDescriptor | OneofDescriptorProto() +// EnumDescriptor | EnumDescriptorProto() +// EnumValueDescriptor | EnumValueDescriptorProto() +// ServiceDescriptor | ServiceDescriptorProto() +// MethodDescriptor | MethodDescriptorProto() +// +// For example, a DescriptorProtoWrapper that implements FileDescriptor +// returns a *descriptorpb.FileDescriptorProto value from its AsProto +// method and also provides a method with the following signature: +// +// FileDescriptorProto() *descriptorpb.FileDescriptorProto +type DescriptorProtoWrapper interface { + protoreflect.Descriptor + // AsProto returns the underlying descriptor proto. The concrete + // type of the proto message depends on the type of this + // descriptor: + // Descriptor Type Proto Message Type + // ---------------------+------------------------------------ + // FileDescriptor | *descriptorpb.FileDescriptorProto + // MessageDescriptor | *descriptorpb.DescriptorProto + // FieldDescriptor | *descriptorpb.FieldDescriptorProto + // OneofDescriptor | *descriptorpb.OneofDescriptorProto + // EnumDescriptor | *descriptorpb.EnumDescriptorProto + // EnumValueDescriptor | *descriptorpb.EnumValueDescriptorProto + // ServiceDescriptor | *descriptorpb.ServiceDescriptorProto + // MethodDescriptor | *descriptorpb.MethodDescriptorProto + AsProto() proto.Message +} + +// ProtoFromDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message { + switch d := d.(type) { + case protoreflect.FileDescriptor: + return ProtoFromFileDescriptor(d) + case protoreflect.MessageDescriptor: + return ProtoFromMessageDescriptor(d) + case protoreflect.FieldDescriptor: + return ProtoFromFieldDescriptor(d) + case protoreflect.OneofDescriptor: + return ProtoFromOneofDescriptor(d) + case protoreflect.EnumDescriptor: + return ProtoFromEnumDescriptor(d) + case protoreflect.EnumValueDescriptor: + return ProtoFromEnumValueDescriptor(d) + case protoreflect.ServiceDescriptor: + return ProtoFromServiceDescriptor(d) + case protoreflect.MethodDescriptor: + return ProtoFromMethodDescriptor(d) + default: + // WTF?? + if res, ok := d.(DescriptorProtoWrapper); ok { + return res.AsProto() + } + return nil + } +} + +// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For file descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. File descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + if imp, ok := d.(protoreflect.FileImport); ok { + d = imp.FileDescriptor + } + type canProto interface { + FileDescriptorProto() *descriptorpb.FileDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.FileDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok { + return fd + } + } + return protodesc.ToFileDescriptorProto(d) +} + +// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For message descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Message descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + type canProto interface { + MessageDescriptorProto() *descriptorpb.DescriptorProto + } + if res, ok := d.(canProto); ok { + return res.MessageDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok { + return md + } + } + return protodesc.ToDescriptorProto(d) +} + +// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For field descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Field descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + type canProto interface { + FieldDescriptorProto() *descriptorpb.FieldDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.FieldDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok { + return fd + } + } + return protodesc.ToFieldDescriptorProto(d) +} + +// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For oneof descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Oneof descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + type canProto interface { + OneofDescriptorProto() *descriptorpb.OneofDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.OneofDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok { + return ood + } + } + return protodesc.ToOneofDescriptorProto(d) +} + +// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For enum descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Enum descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + type canProto interface { + EnumDescriptorProto() *descriptorpb.EnumDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.EnumDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok { + return ed + } + } + return protodesc.ToEnumDescriptorProto(d) +} + +// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For enum value descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Enum value descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + type canProto interface { + EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.EnumValueDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok { + return ed + } + } + return protodesc.ToEnumValueDescriptorProto(d) +} + +// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For service descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Service descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + type canProto interface { + ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.ServiceDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok { + return sd + } + } + return protodesc.ToServiceDescriptorProto(d) +} + +// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich" +// descriptor. For method descriptors generated by the compiler, this is an +// inexpensive and non-lossy operation. Method descriptors from other sources +// however may be expensive (to re-create a proto) and even lossy. +func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + type canProto interface { + MethodDescriptorProto() *descriptorpb.MethodDescriptorProto + } + if res, ok := d.(canProto); ok { + return res.MethodDescriptorProto() + } + if res, ok := d.(DescriptorProtoWrapper); ok { + if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok { + return md + } + } + return protodesc.ToMethodDescriptorProto(d) +} diff --git a/vendor/github.com/bufbuild/protocompile/reporter/errors.go b/vendor/github.com/bufbuild/protocompile/reporter/errors.go new file mode 100644 index 00000000..3a70a43e --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/reporter/errors.go @@ -0,0 +1,74 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reporter + +import ( + "errors" + "fmt" + + "github.com/bufbuild/protocompile/ast" +) + +// ErrInvalidSource is a sentinel error that is returned by compilation and +// stand-alone compilation steps (such as parsing, linking) when one or more +// errors is reported but the configured ErrorReporter always returns nil. +var ErrInvalidSource = errors.New("parse failed: invalid proto source") + +// ErrorWithPos is an error about a proto source file that adds information +// about the location in the file that caused the error. +type ErrorWithPos interface { + error + ast.SourceSpan + // GetPosition returns the start source position that caused the underlying error. + GetPosition() ast.SourcePos + // Unwrap returns the underlying error. + Unwrap() error +} + +// Error creates a new ErrorWithPos from the given error and source position. +func Error(span ast.SourceSpan, err error) ErrorWithPos { + var ewp ErrorWithPos + if errors.As(err, &ewp) { + // replace existing position with given one + return &errorWithSpan{SourceSpan: span, underlying: ewp.Unwrap()} + } + return &errorWithSpan{SourceSpan: span, underlying: err} +} + +// Errorf creates a new ErrorWithPos whose underlying error is created using the +// given message format and arguments (via fmt.Errorf). +func Errorf(span ast.SourceSpan, format string, args ...interface{}) ErrorWithPos { + return Error(span, fmt.Errorf(format, args...)) +} + +type errorWithSpan struct { + ast.SourceSpan + underlying error +} + +func (e *errorWithSpan) Error() string { + sourcePos := e.GetPosition() + return fmt.Sprintf("%s: %v", sourcePos, e.underlying) +} + +func (e *errorWithSpan) GetPosition() ast.SourcePos { + return e.Start() +} + +func (e *errorWithSpan) Unwrap() error { + return e.underlying +} + +var _ ErrorWithPos = (*errorWithSpan)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/reporter/reporter.go b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go new file mode 100644 index 00000000..8e906406 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go @@ -0,0 +1,219 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package reporter contains the types used for reporting errors from +// protocompile operations. It contains error types as well as interfaces +// for reporting and handling errors and warnings. +package reporter + +import ( + "sync" + + "github.com/bufbuild/protocompile/ast" +) + +// ErrorReporter is responsible for reporting the given error. If the reporter +// returns a non-nil error, parsing/linking will abort with that error. If the +// reporter returns nil, parsing will continue, allowing the parser to try to +// report as many syntax and/or link errors as it can find. +type ErrorReporter func(err ErrorWithPos) error + +// WarningReporter is responsible for reporting the given warning. This is used +// for indicating non-error messages to the calling program for things that do +// not cause the parse to fail but are considered bad practice. Though they are +// just warnings, the details are supplied to the reporter via an error type. +type WarningReporter func(ErrorWithPos) + +// Reporter is a type that handles reporting both errors and warnings. +// A reporter does not need to be thread-safe. Safe concurrent access is +// managed by a Handler. +type Reporter interface { + // Error is called when the given error is encountered and needs to be + // reported to the calling program. This signature matches ErrorReporter + // because it has the same semantics. If this function returns non-nil + // then the operation will abort immediately with the given error. But + // if it returns nil, the operation will continue, reporting more errors + // as they are encountered. If the reporter never returns non-nil then + // the operation will eventually fail with ErrInvalidSource. + Error(ErrorWithPos) error + // Warning is called when the given warnings is encountered and needs to be + // reported to the calling program. Despite the argument being an error + // type, a warning will never cause the operation to abort or fail (unless + // the reporter's implementation of this method panics). + Warning(ErrorWithPos) +} + +// NewReporter creates a new reporter that invokes the given functions on error +// or warning. +func NewReporter(errs ErrorReporter, warnings WarningReporter) Reporter { + return reporterFuncs{errs: errs, warnings: warnings} +} + +type reporterFuncs struct { + errs ErrorReporter + warnings WarningReporter +} + +func (r reporterFuncs) Error(err ErrorWithPos) error { + if r.errs == nil { + return err + } + return r.errs(err) +} + +func (r reporterFuncs) Warning(err ErrorWithPos) { + if r.warnings != nil { + r.warnings(err) + } +} + +// Handler is used by protocompile operations for handling errors and warnings. +// This type is thread-safe. It uses a mutex to serialize calls to its reporter +// so that reporter instances do not have to be thread-safe (unless re-used +// across multiple handlers). +type Handler struct { + parent *Handler + mu sync.Mutex + reporter Reporter + errsReported bool + err error +} + +// NewHandler creates a new Handler that reports errors and warnings using the +// given reporter. +func NewHandler(rep Reporter) *Handler { + if rep == nil { + rep = NewReporter(nil, nil) + } + return &Handler{reporter: rep} +} + +// SubHandler returns a "child" of h. Use of a child handler is the same as use +// of the parent, except that the Error() and ReporterError() functions only +// report non-nil for errors that were reported using the child handler. So +// errors reported directly to the parent or to a different child handler won't +// be returned. This is useful for making concurrent access to the handler more +// deterministic: if a child handler is only used from one goroutine, its view +// of reported errors is consistent and unimpacted by concurrent operations. +func (h *Handler) SubHandler() *Handler { + return &Handler{parent: h} +} + +// HandleError handles the given error. If the given err is an ErrorWithPos, it +// is reported, and this function returns the error returned by the reporter. If +// the given err is NOT an ErrorWithPos, the current operation will abort +// immediately. +// +// If the handler has already aborted (by returning a non-nil error from a prior +// call to HandleError or HandleErrorf), that same error is returned and the +// given error is not reported. +func (h *Handler) HandleError(err error) error { + if h.parent != nil { + _, isErrWithPos := err.(ErrorWithPos) + err = h.parent.HandleError(err) + + // update child state + h.mu.Lock() + defer h.mu.Unlock() + if isErrWithPos { + h.errsReported = true + } + h.err = err + return err + } + + h.mu.Lock() + defer h.mu.Unlock() + + if h.err != nil { + return h.err + } + if ewp, ok := err.(ErrorWithPos); ok { + h.errsReported = true + err = h.reporter.Error(ewp) + } + h.err = err + return err +} + +// HandleErrorWithPos handles an error with the given source position. +// +// If the handler has already aborted (by returning a non-nil error from a prior +// call to HandleError or HandleErrorf), that same error is returned and the +// given error is not reported. +func (h *Handler) HandleErrorWithPos(span ast.SourceSpan, err error) error { + return h.HandleError(Error(span, err)) +} + +// HandleErrorf handles an error with the given source position, creating the +// error using the given message format and arguments. +// +// If the handler has already aborted (by returning a non-nil error from a call +// to HandleError or HandleErrorf), that same error is returned and the given +// error is not reported. +func (h *Handler) HandleErrorf(span ast.SourceSpan, format string, args ...interface{}) error { + return h.HandleError(Errorf(span, format, args...)) +} + +// HandleWarning handles the given warning. This will delegate to the handler's +// configured reporter. +func (h *Handler) HandleWarning(err ErrorWithPos) { + if h.parent != nil { + h.parent.HandleWarning(err) + return + } + + // even though we aren't touching mutable fields, we acquire lock anyway so + // that underlying reporter does not have to be thread-safe + h.mu.Lock() + defer h.mu.Unlock() + + h.reporter.Warning(err) +} + +// HandleWarningWithPos handles a warning with the given source position. This will +// delegate to the handler's configured reporter. +func (h *Handler) HandleWarningWithPos(span ast.SourceSpan, err error) { + h.HandleWarning(Error(span, err)) +} + +// HandleWarningf handles a warning with the given source position, creating the +// actual error value using the given message format and arguments. +func (h *Handler) HandleWarningf(span ast.SourceSpan, format string, args ...interface{}) { + h.HandleWarning(Errorf(span, format, args...)) +} + +// Error returns the handler result. If any errors have been reported then this +// returns a non-nil error. If the reporter never returned a non-nil error then +// ErrInvalidSource is returned. Otherwise, this returns the error returned by +// the handler's reporter (the same value returned by ReporterError). +func (h *Handler) Error() error { + h.mu.Lock() + defer h.mu.Unlock() + + if h.errsReported && h.err == nil { + return ErrInvalidSource + } + return h.err +} + +// ReporterError returns the error returned by the handler's reporter. If +// the reporter has either not been invoked (no errors handled) or has not +// returned any non-nil value, then this returns nil. +func (h *Handler) ReporterError() error { + h.mu.Lock() + defer h.mu.Unlock() + + return h.err +} diff --git a/vendor/github.com/bufbuild/protocompile/resolver.go b/vendor/github.com/bufbuild/protocompile/resolver.go new file mode 100644 index 00000000..400d554b --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/resolver.go @@ -0,0 +1,215 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "errors" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/parser" +) + +// Resolver is used by the compiler to resolve a proto source file name +// into some unit that is usable by the compiler. The result could be source +// for a proto file or it could be an already-parsed AST or descriptor. +// +// Resolver implementations must be thread-safe as a single compilation +// operation could invoke FindFileByPath from multiple goroutines. +type Resolver interface { + // FindFileByPath searches for information for the given file path. If no + // result is available, it should return a non-nil error, such as + // protoregistry.NotFound. + FindFileByPath(path string) (SearchResult, error) +} + +// SearchResult represents information about a proto source file. Only one of +// the various fields must be set, based on what is available for a file. If +// multiple fields are set, the compiler prefers them in opposite order listed: +// so it uses a descriptor if present and only falls back to source if nothing +// else is available. +type SearchResult struct { + // Represents source code for the file. This should be nil if source code + // is not available. If no field below is set, then the compiler will parse + // the source code into an AST. + Source io.Reader + // Represents the abstract syntax tree for the file. If no field below is + // set, then the compiler will convert the AST into a descriptor proto. + AST *ast.FileNode + // A descriptor proto that represents the file. If the field below is not + // set, then the compiler will link this proto with its dependencies to + // produce a linked descriptor. + Proto *descriptorpb.FileDescriptorProto + // A parse result for the file. This packages both an AST and a descriptor + // proto in one. When a parser result is available, it is more efficient + // than using an AST search result, since the descriptor proto need not be + // re-created. And it provides better error messages than a descriptor proto + // search result, since the AST has greater fidelity with regard to source + // positions (even if the descriptor proto includes source code info). + ParseResult parser.Result + // A fully linked descriptor that represents the file. If this field is set, + // then the compiler has little or no additional work to do for this file as + // it is already compiled. If this value implements linker.File, there is no + // additional work. Otherwise, the additional work is to compute an index of + // symbols in the file, for efficient lookup. + Desc protoreflect.FileDescriptor +} + +// ResolverFunc is a simple function type that implements Resolver. +type ResolverFunc func(string) (SearchResult, error) + +var _ Resolver = ResolverFunc(nil) + +func (f ResolverFunc) FindFileByPath(path string) (SearchResult, error) { + return f(path) +} + +// CompositeResolver is a slice of resolvers, which are consulted in order +// until one can supply a result. If none of the constituent resolvers can +// supply a result, the error returned by the first resolver is returned. If +// the slice of resolvers is empty, all operations return +// protoregistry.NotFound. +type CompositeResolver []Resolver + +var _ Resolver = CompositeResolver(nil) + +func (f CompositeResolver) FindFileByPath(path string) (SearchResult, error) { + if len(f) == 0 { + return SearchResult{}, protoregistry.NotFound + } + var firstErr error + for _, res := range f { + r, err := res.FindFileByPath(path) + if err == nil { + return r, nil + } + if firstErr == nil { + firstErr = err + } + } + return SearchResult{}, firstErr +} + +// SourceResolver can resolve file names by returning source code. It uses +// an optional list of import paths to search. By default, it searches the +// file system. +type SourceResolver struct { + // Optional list of import paths. If present and not empty, then all + // file paths to find are assumed to be relative to one of these paths. + // If nil or empty, all file paths to find are assumed to be relative to + // the current working directory. + ImportPaths []string + // Optional function for returning a file's contents. If nil, then + // os.Open is used to open files on the file system. + // + // This function must be thread-safe as a single compilation operation + // could result in concurrent invocations of this function from + // multiple goroutines. + Accessor func(path string) (io.ReadCloser, error) +} + +var _ Resolver = (*SourceResolver)(nil) + +func (r *SourceResolver) FindFileByPath(path string) (SearchResult, error) { + if len(r.ImportPaths) == 0 { + reader, err := r.accessFile(path) + if err != nil { + return SearchResult{}, err + } + return SearchResult{Source: reader}, nil + } + + var e error + for _, importPath := range r.ImportPaths { + reader, err := r.accessFile(filepath.Join(importPath, path)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + e = err + continue + } + return SearchResult{}, err + } + return SearchResult{Source: reader}, nil + } + return SearchResult{}, e +} + +func (r *SourceResolver) accessFile(path string) (io.ReadCloser, error) { + if r.Accessor != nil { + return r.Accessor(path) + } + return os.Open(path) +} + +// SourceAccessorFromMap returns a function that can be used as the Accessor +// field of a SourceResolver that uses the given map to load source. The map +// keys are file names and the values are the corresponding file contents. +// +// The given map is used directly and not copied. Since accessor functions +// must be thread-safe, this means that the provided map must not be mutated +// once this accessor is provided to a compile operation. +func SourceAccessorFromMap(srcs map[string]string) func(string) (io.ReadCloser, error) { + return func(path string) (io.ReadCloser, error) { + src, ok := srcs[path] + if !ok { + return nil, os.ErrNotExist + } + return io.NopCloser(strings.NewReader(src)), nil + } +} + +// WithStandardImports returns a new resolver that knows about the same standard +// imports that are included with protoc. +// +// Note that this uses the descriptors embedded in generated code in the packages +// of the Protobuf Go module, except for "google/protobuf/cpp_features.proto" and +// "google/protobuf/java_features.proto". For those two files, compiled descriptors +// are embedded in this module because there is no package in the Protobuf Go module +// that contains generated code for those files. This resolver also provides results +// for the "google/protobuf/go_features.proto", which is technically not a standard +// file (it is not included with protoc) but is included in generated code in the +// Protobuf Go module. +// +// As of v0.14.0 of this module (and v1.34.2 of the Protobuf Go module and v27.0 of +// Protobuf), the contents of the standard import "google/protobuf/descriptor.proto" +// contain extension declarations which are *absent* from the descriptors that this +// resolver returns. That is because extension declarations are only retained in +// source, not at runtime, which means they are not available in the embedded +// descriptors in generated code. +// +// To use versions of the standard imports that *do* include these extension +// declarations, see wellknownimports.WithStandardImports instead. As of this +// writing, the declarations are only needed to prevent source files from +// illegally re-defining the custom features for C++, Java, and Go. +func WithStandardImports(r Resolver) Resolver { + return ResolverFunc(func(name string) (SearchResult, error) { + res, err := r.FindFileByPath(name) + if err != nil { + // error from given resolver? see if it's a known standard file + if d, ok := standardImports[name]; ok { + return SearchResult{Desc: d}, nil + } + } + return res, err + }) +} diff --git a/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go new file mode 100644 index 00000000..3b0ae657 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go @@ -0,0 +1,962 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sourceinfo contains the logic for computing source code info for a +// file descriptor. +// +// The inputs to the computation are an AST for a file as well as the index of +// interpreted options for that file. +package sourceinfo + +import ( + "bytes" + "fmt" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/internal" +) + +// OptionIndex is a mapping of AST nodes that define options to corresponding +// paths into the containing file descriptor. The path is a sequence of field +// tags and indexes that define a traversal path from the root (the file +// descriptor) to the resolved option field. The info also includes similar +// information about child elements, for options whose values are composite +// (like a list or message literal). +type OptionIndex map[*ast.OptionNode]*OptionSourceInfo + +// OptionSourceInfo describes the source info path for an option value and +// contains information about the value's descendants in the AST. +type OptionSourceInfo struct { + // The source info path to this element. If this element represents a + // declaration with an array-literal value, the last element of the + // path is the index of the first item in the array. + // + // This path is relative to the options message. So the first element + // is a field number of the options message. + // + // If the first element is negative, it indicates the number of path + // components to remove from the path to the relevant options. This is + // used for field pseudo-options, so that the path indicates a field on + // the descriptor, which is a parent of the options message (since that + // is how the pseudo-options are actually stored). + Path []int32 + // Children can be an *ArrayLiteralSourceInfo, a *MessageLiteralSourceInfo, + // or nil, depending on whether the option's value is an + // [*ast.ArrayLiteralNode], an [*ast.MessageLiteralNode], or neither. + // For [*ast.ArrayLiteralNode] values, this is only populated if the + // value is a non-empty array of messages. (Empty arrays and arrays + // of scalar values do not need any additional info.) + Children OptionChildrenSourceInfo +} + +// OptionChildrenSourceInfo represents source info paths for child elements of +// an option value. +type OptionChildrenSourceInfo interface { + isChildSourceInfo() +} + +// ArrayLiteralSourceInfo represents source info paths for the child +// elements of an [*ast.ArrayLiteralNode]. This value is only useful for +// non-empty array literals that contain messages. +type ArrayLiteralSourceInfo struct { + Elements []OptionSourceInfo +} + +func (*ArrayLiteralSourceInfo) isChildSourceInfo() {} + +// MessageLiteralSourceInfo represents source info paths for the child +// elements of an [*ast.MessageLiteralNode]. +type MessageLiteralSourceInfo struct { + Fields map[*ast.MessageFieldNode]*OptionSourceInfo +} + +func (*MessageLiteralSourceInfo) isChildSourceInfo() {} + +// GenerateSourceInfo generates source code info for the given AST. If the given +// opts is present, it can generate source code info for interpreted options. +// Otherwise, any options in the AST will get source code info as uninterpreted +// options. +func GenerateSourceInfo(file *ast.FileNode, opts OptionIndex, genOpts ...GenerateOption) *descriptorpb.SourceCodeInfo { + if file == nil { + return nil + } + sci := sourceCodeInfo{file: file, commentsUsed: map[ast.SourcePos]struct{}{}} + for _, sourceInfoOpt := range genOpts { + sourceInfoOpt.apply(&sci) + } + generateSourceInfoForFile(opts, &sci, file) + return &descriptorpb.SourceCodeInfo{Location: sci.locs} +} + +// GenerateOption represents an option for how source code info is generated. +type GenerateOption interface { + apply(*sourceCodeInfo) +} + +// WithExtraComments will result in source code info that contains extra comments. +// By default, comments are only generated for full declarations. Inline comments +// around elements of a declaration are not included in source code info. This option +// changes that behavior so that as many comments as possible are described in the +// source code info. +func WithExtraComments() GenerateOption { + return extraCommentsOption{} +} + +// WithExtraOptionLocations will result in source code info that contains extra +// locations to describe elements inside of a message literal. By default, option +// values are treated as opaque, so the only locations included are for the entire +// option value. But with this option, paths to the various fields set inside a +// message literal will also have locations. This makes it possible for usages of +// the source code info to report precise locations for specific fields inside the +// value. +func WithExtraOptionLocations() GenerateOption { + return extraOptionLocationsOption{} +} + +type extraCommentsOption struct{} + +func (e extraCommentsOption) apply(info *sourceCodeInfo) { + info.extraComments = true +} + +type extraOptionLocationsOption struct{} + +func (e extraOptionLocationsOption) apply(info *sourceCodeInfo) { + info.extraOptionLocs = true +} + +func generateSourceInfoForFile(opts OptionIndex, sci *sourceCodeInfo, file *ast.FileNode) { + path := make([]int32, 0, 16) + + sci.newLocWithoutComments(file, nil) + + if file.Syntax != nil { + sci.newLocWithComments(file.Syntax, append(path, internal.FileSyntaxTag)) + } + if file.Edition != nil { + sci.newLocWithComments(file.Edition, append(path, internal.FileEditionTag)) + } + + var depIndex, pubDepIndex, weakDepIndex, optIndex, msgIndex, enumIndex, extendIndex, svcIndex int32 + + for _, child := range file.Decls { + switch child := child.(type) { + case *ast.ImportNode: + sci.newLocWithComments(child, append(path, internal.FileDependencyTag, depIndex)) + depIndex++ + if child.Public != nil { + sci.newLoc(child.Public, append(path, internal.FilePublicDependencyTag, pubDepIndex)) + pubDepIndex++ + } else if child.Weak != nil { + sci.newLoc(child.Weak, append(path, internal.FileWeakDependencyTag, weakDepIndex)) + weakDepIndex++ + } + case *ast.PackageNode: + sci.newLocWithComments(child, append(path, internal.FilePackageTag)) + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.FileOptionsTag)) + case *ast.MessageNode: + generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.FileMessagesTag, msgIndex)) + msgIndex++ + case *ast.EnumNode: + generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.FileEnumsTag, enumIndex)) + enumIndex++ + case *ast.ExtendNode: + extsPath := append(path, internal.FileExtensionsTag) //nolint:gocritic // intentionally creating new slice var + // we clone the path here so that append can't mutate extsPath, since they may share storage + msgsPath := append(internal.ClonePath(path), internal.FileMessagesTag) + generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &msgIndex, extsPath, msgsPath) + case *ast.ServiceNode: + generateSourceCodeInfoForService(opts, sci, child, append(path, internal.FileServicesTag, svcIndex)) + svcIndex++ + } + } +} + +func generateSourceCodeInfoForOption(opts OptionIndex, sci *sourceCodeInfo, n *ast.OptionNode, compact bool, uninterpIndex *int32, path []int32) { + if !compact { + sci.newLocWithoutComments(n, path) + } + optInfo := opts[n] + if optInfo != nil { + fullPath := combinePathsForOption(path, optInfo.Path) + if compact { + sci.newLoc(n, fullPath) + } else { + sci.newLocWithComments(n, fullPath) + } + if sci.extraOptionLocs { + generateSourceInfoForOptionChildren(sci, n.Val, path, fullPath, optInfo.Children) + } + return + } + + // it's an uninterpreted option + optPath := path + optPath = append(optPath, internal.UninterpretedOptionsTag, *uninterpIndex) + *uninterpIndex++ + sci.newLoc(n, optPath) + var valTag int32 + switch n.Val.(type) { + case ast.IdentValueNode: + valTag = internal.UninterpretedIdentTag + case *ast.NegativeIntLiteralNode: + valTag = internal.UninterpretedNegIntTag + case ast.IntValueNode: + valTag = internal.UninterpretedPosIntTag + case ast.FloatValueNode: + valTag = internal.UninterpretedDoubleTag + case ast.StringValueNode: + valTag = internal.UninterpretedStringTag + case *ast.MessageLiteralNode: + valTag = internal.UninterpretedAggregateTag + } + if valTag != 0 { + sci.newLoc(n.Val, append(optPath, valTag)) + } + for j, nn := range n.Name.Parts { + optNmPath := optPath + optNmPath = append(optNmPath, internal.UninterpretedNameTag, int32(j)) + sci.newLoc(nn, optNmPath) + sci.newLoc(nn.Name, append(optNmPath, internal.UninterpretedNameNameTag)) + } +} + +func combinePathsForOption(prefix, optionPath []int32) []int32 { + fullPath := make([]int32, len(prefix), len(prefix)+len(optionPath)) + copy(fullPath, prefix) + if optionPath[0] == -1 { + // used by "default" and "json_name" field pseudo-options + // to attribute path to parent element (since those are + // stored directly on the descriptor, not its options) + optionPath = optionPath[1:] + fullPath = fullPath[:len(prefix)-1] + } + return append(fullPath, optionPath...) +} + +func generateSourceInfoForOptionChildren(sci *sourceCodeInfo, n ast.ValueNode, pathPrefix, path []int32, childInfo OptionChildrenSourceInfo) { + switch childInfo := childInfo.(type) { + case *ArrayLiteralSourceInfo: + if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok { + for i, val := range arrayLiteral.Elements { + elementInfo := childInfo.Elements[i] + fullPath := combinePathsForOption(pathPrefix, elementInfo.Path) + sci.newLoc(val, fullPath) + generateSourceInfoForOptionChildren(sci, val, pathPrefix, fullPath, elementInfo.Children) + } + } + case *MessageLiteralSourceInfo: + if msgLiteral, ok := n.(*ast.MessageLiteralNode); ok { + for _, fieldNode := range msgLiteral.Elements { + fieldInfo, ok := childInfo.Fields[fieldNode] + if !ok { + continue + } + fullPath := combinePathsForOption(pathPrefix, fieldInfo.Path) + locationNode := ast.Node(fieldNode) + if fieldNode.Name.IsAnyTypeReference() && fullPath[len(fullPath)-1] == internal.AnyValueTag { + // This is a special expanded Any. So also insert a location + // for the type URL field. + typeURLPath := make([]int32, len(fullPath)) + copy(typeURLPath, fullPath) + typeURLPath[len(typeURLPath)-1] = internal.AnyTypeURLTag + sci.newLoc(fieldNode.Name, fullPath) + // And create the next location so it's just the value, + // not the full field definition. + locationNode = fieldNode.Val + } + _, isArrayLiteral := fieldNode.Val.(*ast.ArrayLiteralNode) + if !isArrayLiteral { + // We don't include this with an array literal since the path + // is to the first element of the array. If we added it here, + // it would be redundant with the child info we add next, and + // it wouldn't be entirely correct since it only indicates the + // index of the first element in the array (and not the others). + sci.newLoc(locationNode, fullPath) + } + generateSourceInfoForOptionChildren(sci, fieldNode.Val, pathPrefix, fullPath, fieldInfo.Children) + } + } + case nil: + if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok { + // an array literal without child source info is an array of scalars + for i, val := range arrayLiteral.Elements { + // last element of path is starting index for array literal + elementPath := append(([]int32)(nil), path...) + elementPath[len(elementPath)-1] += int32(i) + sci.newLoc(val, elementPath) + } + } + } +} + +func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) { + var openBrace ast.Node + + var decls []ast.MessageElement + switch n := n.(type) { + case *ast.MessageNode: + openBrace = n.OpenBrace + decls = n.Decls + case *ast.SyntheticGroupMessageNode: + openBrace = n.OpenBrace + decls = n.Decls + case *ast.SyntheticMapEntryNode: + sci.newLoc(n, path) + // map entry so nothing else to do + return + } + sci.newBlockLocWithComments(n, openBrace, path) + + sci.newLoc(n.MessageName(), append(path, internal.MessageNameTag)) + // matching protoc, which emits the corresponding field type name (for group fields) + // right after the source location for the group message name + if fieldPath != nil { + sci.newLoc(n.MessageName(), append(fieldPath, internal.FieldTypeNameTag)) + } + + var optIndex, fieldIndex, oneofIndex, extendIndex, nestedMsgIndex int32 + var nestedEnumIndex, extRangeIndex, reservedRangeIndex, reservedNameIndex int32 + for _, child := range decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.MessageOptionsTag)) + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) + fieldIndex++ + case *ast.GroupNode: + fldPath := append(path, internal.MessageFieldsTag, fieldIndex) //nolint:gocritic // intentionally creating new slice var + generateSourceCodeInfoForField(opts, sci, child, fldPath) + fieldIndex++ + // we clone the path here so that append can't mutate fldPath, since they may share storage + msgPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag, nestedMsgIndex) + generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, msgPath) + nestedMsgIndex++ + case *ast.MapFieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) + fieldIndex++ + nestedMsgIndex++ + case *ast.OneofNode: + fldsPath := append(path, internal.MessageFieldsTag) //nolint:gocritic // intentionally creating new slice var + // we clone the path here and below so that append ops can't mutate + // fldPath or msgsPath, since they may otherwise share storage + msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag) + ooPath := append(internal.ClonePath(path), internal.MessageOneofsTag, oneofIndex) + generateSourceCodeInfoForOneof(opts, sci, child, &fieldIndex, &nestedMsgIndex, fldsPath, msgsPath, ooPath) + oneofIndex++ + case *ast.MessageNode: + generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.MessageNestedMessagesTag, nestedMsgIndex)) + nestedMsgIndex++ + case *ast.EnumNode: + generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.MessageEnumsTag, nestedEnumIndex)) + nestedEnumIndex++ + case *ast.ExtendNode: + extsPath := append(path, internal.MessageExtensionsTag) //nolint:gocritic // intentionally creating new slice var + // we clone the path here so that append can't mutate extsPath, since they may share storage + msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag) + generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &nestedMsgIndex, extsPath, msgsPath) + case *ast.ExtensionRangeNode: + generateSourceCodeInfoForExtensionRanges(opts, sci, child, &extRangeIndex, append(path, internal.MessageExtensionRangesTag)) + case *ast.ReservedNode: + if len(child.Names) > 0 { + resPath := path + resPath = append(resPath, internal.MessageReservedNamesTag) + sci.newLocWithComments(child, resPath) + for _, rn := range child.Names { + sci.newLoc(rn, append(resPath, reservedNameIndex)) + reservedNameIndex++ + } + } + if len(child.Ranges) > 0 { + resPath := path + resPath = append(resPath, internal.MessageReservedRangesTag) + sci.newLocWithComments(child, resPath) + for _, rr := range child.Ranges { + generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex)) + reservedRangeIndex++ + } + } + } + } +} + +func generateSourceCodeInfoForEnum(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumNode, path []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + sci.newLoc(n.Name, append(path, internal.EnumNameTag)) + + var optIndex, valIndex, reservedNameIndex, reservedRangeIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.EnumOptionsTag)) + case *ast.EnumValueNode: + generateSourceCodeInfoForEnumValue(opts, sci, child, append(path, internal.EnumValuesTag, valIndex)) + valIndex++ + case *ast.ReservedNode: + if len(child.Names) > 0 { + resPath := path + resPath = append(resPath, internal.EnumReservedNamesTag) + sci.newLocWithComments(child, resPath) + for _, rn := range child.Names { + sci.newLoc(rn, append(resPath, reservedNameIndex)) + reservedNameIndex++ + } + } + if len(child.Ranges) > 0 { + resPath := path + resPath = append(resPath, internal.EnumReservedRangesTag) + sci.newLocWithComments(child, resPath) + for _, rr := range child.Ranges { + generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex)) + reservedRangeIndex++ + } + } + } + } +} + +func generateSourceCodeInfoForEnumValue(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumValueNode, path []int32) { + sci.newLocWithComments(n, path) + sci.newLoc(n.Name, append(path, internal.EnumValNameTag)) + sci.newLoc(n.Number, append(path, internal.EnumValNumberTag)) + + // enum value options + if n.Options != nil { + optsPath := path + optsPath = append(optsPath, internal.EnumValOptionsTag) + sci.newLoc(n.Options, optsPath) + var optIndex int32 + for _, opt := range n.Options.GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } +} + +func generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo, n *ast.RangeNode, path []int32) { + sci.newLoc(n, path) + sci.newLoc(n.StartVal, append(path, internal.ReservedRangeStartTag)) + switch { + case n.EndVal != nil: + sci.newLoc(n.EndVal, append(path, internal.ReservedRangeEndTag)) + case n.Max != nil: + sci.newLoc(n.Max, append(path, internal.ReservedRangeEndTag)) + default: + sci.newLoc(n.StartVal, append(path, internal.ReservedRangeEndTag)) + } +} + +func generateSourceCodeInfoForExtensions(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, extendPath) + for _, decl := range n.Decls { + switch decl := decl.(type) { + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, decl, append(extendPath, *extendIndex)) + *extendIndex++ + case *ast.GroupNode: + fldPath := extendPath + fldPath = append(fldPath, *extendIndex) + generateSourceCodeInfoForField(opts, sci, decl, fldPath) + *extendIndex++ + generateSourceCodeInfoForMessage(opts, sci, decl.AsMessage(), fldPath, append(msgPath, *msgIndex)) + *msgIndex++ + } + } +} + +func generateSourceCodeInfoForOneof(opts OptionIndex, sci *sourceCodeInfo, n *ast.OneofNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneofPath []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, oneofPath) + sci.newLoc(n.Name, append(oneofPath, internal.OneofNameTag)) + + var optIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(oneofPath, internal.OneofOptionsTag)) + case *ast.FieldNode: + generateSourceCodeInfoForField(opts, sci, child, append(fieldPath, *fieldIndex)) + *fieldIndex++ + case *ast.GroupNode: + fldPath := fieldPath + fldPath = append(fldPath, *fieldIndex) + generateSourceCodeInfoForField(opts, sci, child, fldPath) + *fieldIndex++ + generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, append(nestedMsgPath, *nestedMsgIndex)) + *nestedMsgIndex++ + } + } +} + +func generateSourceCodeInfoForField(opts OptionIndex, sci *sourceCodeInfo, n ast.FieldDeclNode, path []int32) { + var fieldType string + if f, ok := n.(*ast.FieldNode); ok { + fieldType = string(f.FldType.AsIdentifier()) + } + + if n.GetGroupKeyword() != nil { + // comments will appear on group message + sci.newLocWithoutComments(n, path) + if n.FieldExtendee() != nil { + sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag)) + } + if n.FieldLabel() != nil { + // no comments here either (label is first token for group, so we want + // to leave the comments to be associated with the group message instead) + sci.newLocWithoutComments(n.FieldLabel(), append(path, internal.FieldLabelTag)) + } + sci.newLoc(n.FieldType(), append(path, internal.FieldTypeTag)) + // let the name comments be attributed to the group name + sci.newLocWithoutComments(n.FieldName(), append(path, internal.FieldNameTag)) + } else { + sci.newLocWithComments(n, path) + if n.FieldExtendee() != nil { + sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag)) + } + if n.FieldLabel() != nil { + sci.newLoc(n.FieldLabel(), append(path, internal.FieldLabelTag)) + } + var tag int32 + if _, isScalar := internal.FieldTypes[fieldType]; isScalar { + tag = internal.FieldTypeTag + } else { + // this is a message or an enum, so attribute type location + // to the type name field + tag = internal.FieldTypeNameTag + } + sci.newLoc(n.FieldType(), append(path, tag)) + sci.newLoc(n.FieldName(), append(path, internal.FieldNameTag)) + } + sci.newLoc(n.FieldTag(), append(path, internal.FieldNumberTag)) + + if n.GetOptions() != nil { + optsPath := path + optsPath = append(optsPath, internal.FieldOptionsTag) + sci.newLoc(n.GetOptions(), optsPath) + var optIndex int32 + for _, opt := range n.GetOptions().GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } +} + +func generateSourceCodeInfoForExtensionRanges(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtensionRangeNode, extRangeIndex *int32, path []int32) { + sci.newLocWithComments(n, path) + startExtRangeIndex := *extRangeIndex + for _, child := range n.Ranges { + path := append(path, *extRangeIndex) + *extRangeIndex++ + sci.newLoc(child, path) + sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeStartTag)) + switch { + case child.EndVal != nil: + sci.newLoc(child.EndVal, append(path, internal.ExtensionRangeEndTag)) + case child.Max != nil: + sci.newLoc(child.Max, append(path, internal.ExtensionRangeEndTag)) + default: + sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeEndTag)) + } + } + // options for all ranges go after the start+end values + for range n.Ranges { + path := append(path, startExtRangeIndex) + startExtRangeIndex++ + if n.Options != nil { + optsPath := path + optsPath = append(optsPath, internal.ExtensionRangeOptionsTag) + sci.newLoc(n.Options, optsPath) + var optIndex int32 + for _, opt := range n.Options.GetElements() { + generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath) + } + } + } +} + +func generateSourceCodeInfoForService(opts OptionIndex, sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + sci.newLoc(n.Name, append(path, internal.ServiceNameTag)) + var optIndex, rpcIndex int32 + for _, child := range n.Decls { + switch child := child.(type) { + case *ast.OptionNode: + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.ServiceOptionsTag)) + case *ast.RPCNode: + generateSourceCodeInfoForMethod(opts, sci, child, append(path, internal.ServiceMethodsTag, rpcIndex)) + rpcIndex++ + } + } +} + +func generateSourceCodeInfoForMethod(opts OptionIndex, sci *sourceCodeInfo, n *ast.RPCNode, path []int32) { + if n.OpenBrace != nil { + sci.newBlockLocWithComments(n, n.OpenBrace, path) + } else { + sci.newLocWithComments(n, path) + } + sci.newLoc(n.Name, append(path, internal.MethodNameTag)) + if n.Input.Stream != nil { + sci.newLoc(n.Input.Stream, append(path, internal.MethodInputStreamTag)) + } + sci.newLoc(n.Input.MessageType, append(path, internal.MethodInputTag)) + if n.Output.Stream != nil { + sci.newLoc(n.Output.Stream, append(path, internal.MethodOutputStreamTag)) + } + sci.newLoc(n.Output.MessageType, append(path, internal.MethodOutputTag)) + + optsPath := path + optsPath = append(optsPath, internal.MethodOptionsTag) + var optIndex int32 + for _, decl := range n.Decls { + if opt, ok := decl.(*ast.OptionNode); ok { + generateSourceCodeInfoForOption(opts, sci, opt, false, &optIndex, optsPath) + } + } +} + +type sourceCodeInfo struct { + file *ast.FileNode + extraComments bool + extraOptionLocs bool + locs []*descriptorpb.SourceCodeInfo_Location + commentsUsed map[ast.SourcePos]struct{} +} + +func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { + var start, end ast.SourcePos + if n == sci.file { + // For files, we don't want to consider trailing EOF token + // as part of the span. We want the span to only include + // actual lexical elements in the file (which also excludes + // whitespace and comments). + children := sci.file.Children() + if len(children) > 0 && isEOF(children[len(children)-1]) { + children = children[:len(children)-1] + } + if len(children) == 0 { + start = ast.SourcePos{Filename: sci.file.Name(), Line: 1, Col: 1} + end = start + } else { + start = sci.file.TokenInfo(n.Start()).Start() + end = sci.file.TokenInfo(children[len(children)-1].End()).End() + } + } else { + info := sci.file.NodeInfo(n) + start, end = info.Start(), info.End() + } + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + Path: internal.ClonePath(path), + Span: makeSpan(start, end), + }) +} + +func (sci *sourceCodeInfo) newLoc(n ast.Node, path []int32) { + info := sci.file.NodeInfo(n) + if !sci.extraComments { + start, end := info.Start(), info.End() + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + Path: internal.ClonePath(path), + Span: makeSpan(start, end), + }) + } else { + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(n) + sci.newLocWithGivenComments(info, detachedComments, leadingComments, trailingComments, path) + } +} + +func isEOF(n ast.Node) bool { + r, ok := n.(*ast.RuneNode) + return ok && r.Rune == 0 +} + +func (sci *sourceCodeInfo) newBlockLocWithComments(n, openBrace ast.Node, path []int32) { + // Block definitions use trailing comments after the open brace "{" as the + // element's trailing comments. For example: + // + // message Foo { // this is a trailing comment for a message + // + // } // not this + // + nodeInfo := sci.file.NodeInfo(n) + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(openBrace) + sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path) +} + +func (sci *sourceCodeInfo) newLocWithComments(n ast.Node, path []int32) { + nodeInfo := sci.file.NodeInfo(n) + detachedComments, leadingComments := sci.getLeadingComments(n) + trailingComments := sci.getTrailingComments(n) + sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path) +} + +func (sci *sourceCodeInfo) newLocWithGivenComments(nodeInfo ast.NodeInfo, detachedComments []comments, leadingComments comments, trailingComments comments, path []int32) { + if (len(detachedComments) > 0 && sci.commentUsed(detachedComments[0])) || + (len(detachedComments) == 0 && sci.commentUsed(leadingComments)) { + detachedComments = nil + leadingComments = ast.EmptyComments + } + if sci.commentUsed(trailingComments) { + trailingComments = ast.EmptyComments + } + + var trail *string + if trailingComments.Len() > 0 { + trail = proto.String(sci.combineComments(trailingComments)) + } + + var lead *string + if leadingComments.Len() > 0 { + lead = proto.String(sci.combineComments(leadingComments)) + } + + detached := make([]string, len(detachedComments)) + for i, cmts := range detachedComments { + detached[i] = sci.combineComments(cmts) + } + + sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ + LeadingDetachedComments: detached, + LeadingComments: lead, + TrailingComments: trail, + Path: internal.ClonePath(path), + Span: makeSpan(nodeInfo.Start(), nodeInfo.End()), + }) +} + +type comments interface { + Len() int + Index(int) ast.Comment +} + +type subComments struct { + offs, n int + c ast.Comments +} + +func (s subComments) Len() int { + return s.n +} + +func (s subComments) Index(i int) ast.Comment { + if i < 0 || i >= s.n { + panic(fmt.Errorf("runtime error: index out of range [%d] with length %d", i, s.n)) + } + return s.c.Index(i + s.offs) +} + +func (sci *sourceCodeInfo) getLeadingComments(n ast.Node) ([]comments, comments) { + s := n.Start() + info := sci.file.TokenInfo(s) + var prevInfo ast.NodeInfo + if prev, ok := sci.file.Tokens().Previous(s); ok { + prevInfo = sci.file.TokenInfo(prev) + } + _, d, l := sci.attributeComments(prevInfo, info) + return d, l +} + +func (sci *sourceCodeInfo) getTrailingComments(n ast.Node) comments { + e := n.End() + next, ok := sci.file.Tokens().Next(e) + if !ok { + return ast.EmptyComments + } + info := sci.file.TokenInfo(e) + nextInfo := sci.file.TokenInfo(next) + t, _, _ := sci.attributeComments(info, nextInfo) + return t +} + +func (sci *sourceCodeInfo) attributeComments(prevInfo, info ast.NodeInfo) (t comments, d []comments, l comments) { + detached := groupComments(info.LeadingComments()) + var trail comments + if prevInfo.IsValid() { + trail = comments(prevInfo.TrailingComments()) + if trail.Len() == 0 { + trail, detached = sci.maybeDonate(prevInfo, info, detached) + } + } else { + trail = ast.EmptyComments + } + detached, lead := sci.maybeAttach(prevInfo, info, trail.Len() > 0, detached) + return trail, detached, lead +} + +func (sci *sourceCodeInfo) maybeDonate(prevInfo ast.NodeInfo, info ast.NodeInfo, lead []comments) (t comments, l []comments) { + if len(lead) == 0 { + // nothing to donate + return ast.EmptyComments, nil + } + firstCommentPos := lead[0].Index(0) + if firstCommentPos.Start().Line > prevInfo.End().Line+1 { + // first comment is detached from previous token, so can't be a trailing comment + return ast.EmptyComments, lead + } + if len(lead) > 1 { + // multiple groups? then donate first comment to previous token + return lead[0], lead[1:] + } + // there is only one element in lead + comment := lead[0] + lastCommentPos := comment.Index(comment.Len() - 1) + if lastCommentPos.End().Line < info.Start().Line-1 { + // there is a blank line between the comments and subsequent token, so + // we can donate the comment to previous token + return comment, nil + } + if txt := info.RawText(); txt == "" || (len(txt) == 1 && strings.ContainsAny(txt, "}]),;")) { + // token is a symbol for the end of a scope or EOF, which doesn't need a leading comment + if !sci.extraComments && txt != "" && + firstCommentPos.Start().Line == prevInfo.End().Line && + lastCommentPos.End().Line == info.Start().Line { + // protoc does not donate if prev and next token are on the same line since it's + // ambiguous which one should get the comment; so we mirror that here + return ast.EmptyComments, lead + } + // But with extra comments, we always donate in this situation in order to capture + // more comments. Because otherwise, these comments are lost since these symbols + // don't map to a location in source code info. + return comment, nil + } + // cannot donate + return ast.EmptyComments, lead +} + +func (sci *sourceCodeInfo) maybeAttach(prevInfo ast.NodeInfo, info ast.NodeInfo, hasTrail bool, lead []comments) (d []comments, l comments) { + if len(lead) == 0 { + return nil, ast.EmptyComments + } + + if len(lead) == 1 && !hasTrail && prevInfo.IsValid() { + // If the one comment appears attached to both previous and next tokens, + // don't attach to either. + comment := lead[0] + attachedToPrevious := comment.Index(0).Start().Line == prevInfo.End().Line + attachedToNext := comment.Index(comment.Len()-1).End().Line == info.Start().Line + if attachedToPrevious && attachedToNext { + // Since attachment is ambiguous, leave it detached. + return lead, ast.EmptyComments + } + } + + lastComment := lead[len(lead)-1] + if lastComment.Index(lastComment.Len()-1).End().Line >= info.Start().Line-1 { + return lead[:len(lead)-1], lastComment + } + + return lead, ast.EmptyComments +} + +func makeSpan(start, end ast.SourcePos) []int32 { + if start.Line == end.Line { + return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Col) - 1} + } + return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Line) - 1, int32(end.Col) - 1} +} + +func (sci *sourceCodeInfo) commentUsed(c comments) bool { + if c.Len() == 0 { + return false + } + pos := c.Index(0).Start() + if _, ok := sci.commentsUsed[pos]; ok { + return true + } + + sci.commentsUsed[pos] = struct{}{} + return false +} + +func groupComments(cmts ast.Comments) []comments { + if cmts.Len() == 0 { + return nil + } + var groups []comments + singleLineStyle := cmts.Index(0).RawText()[:2] == "//" + line := cmts.Index(0).End().Line + start := 0 + for i := 1; i < cmts.Len(); i++ { + c := cmts.Index(i) + prevSingleLine := singleLineStyle + singleLineStyle = strings.HasPrefix(c.RawText(), "//") + if !singleLineStyle || prevSingleLine != singleLineStyle || c.Start().Line > line+1 { + // new group! + groups = append(groups, subComments{offs: start, n: i - start, c: cmts}) + start = i + } + line = c.End().Line + } + // don't forget last group + groups = append(groups, subComments{offs: start, n: cmts.Len() - start, c: cmts}) + return groups +} + +func (sci *sourceCodeInfo) combineComments(comments comments) string { + if comments.Len() == 0 { + return "" + } + var buf bytes.Buffer + for i, l := 0, comments.Len(); i < l; i++ { + c := comments.Index(i) + txt := c.RawText() + if txt[:2] == "//" { + buf.WriteString(txt[2:]) + // protoc includes trailing newline for line comments, + // but it's not present in the AST comment. So we need + // to add it if present. + if i, ok := sci.file.Items().Next(c.AsItem()); ok { + info := sci.file.ItemInfo(i) + if strings.HasPrefix(info.LeadingWhitespace(), "\n") { + buf.WriteRune('\n') + } + } + } else { + lines := strings.Split(txt[2:len(txt)-2], "\n") + first := true + for _, l := range lines { + if first { + first = false + buf.WriteString(l) + continue + } + buf.WriteByte('\n') + + // strip a prefix of whitespace followed by '*' + j := 0 + for j < len(l) { + if l[j] != ' ' && l[j] != '\t' { + break + } + j++ + } + switch { + case j == len(l): + l = "" + case l[j] == '*': + l = l[j+1:] + case j > 0: + l = l[j:] + } + + buf.WriteString(l) + } + } + } + return buf.String() +} diff --git a/vendor/github.com/bufbuild/protocompile/std_imports.go b/vendor/github.com/bufbuild/protocompile/std_imports.go new file mode 100644 index 00000000..a31232ac --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/std_imports.go @@ -0,0 +1,96 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + _ "google.golang.org/protobuf/types/gofeaturespb" // link in packages that include the standard protos included with protoc. + _ "google.golang.org/protobuf/types/known/anypb" + _ "google.golang.org/protobuf/types/known/apipb" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/emptypb" + _ "google.golang.org/protobuf/types/known/fieldmaskpb" + _ "google.golang.org/protobuf/types/known/sourcecontextpb" + _ "google.golang.org/protobuf/types/known/structpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + _ "google.golang.org/protobuf/types/known/typepb" + _ "google.golang.org/protobuf/types/known/wrapperspb" + _ "google.golang.org/protobuf/types/pluginpb" + + "github.com/bufbuild/protocompile/internal/featuresext" +) + +// All files that are included with protoc are also included with this package +// so that clients do not need to explicitly supply a copy of these protos (just +// like callers of protoc do not need to supply them). +var standardImports map[string]protoreflect.FileDescriptor + +func init() { + standardFilenames := []string{ + "google/protobuf/any.proto", + "google/protobuf/api.proto", + "google/protobuf/compiler/plugin.proto", + "google/protobuf/descriptor.proto", + "google/protobuf/duration.proto", + "google/protobuf/empty.proto", + "google/protobuf/field_mask.proto", + "google/protobuf/go_features.proto", + "google/protobuf/source_context.proto", + "google/protobuf/struct.proto", + "google/protobuf/timestamp.proto", + "google/protobuf/type.proto", + "google/protobuf/wrappers.proto", + } + + standardImports = map[string]protoreflect.FileDescriptor{} + for _, fn := range standardFilenames { + fd, err := protoregistry.GlobalFiles.FindFileByPath(fn) + if err != nil { + panic(err.Error()) + } + standardImports[fn] = fd + } + + otherFeatures := []struct { + Name string + GetDescriptor func() (protoreflect.FileDescriptor, error) + }{ + { + Name: "google/protobuf/cpp_features.proto", + GetDescriptor: featuresext.CppFeaturesDescriptor, + }, + { + Name: "google/protobuf/java_features.proto", + GetDescriptor: featuresext.JavaFeaturesDescriptor, + }, + } + for _, feature := range otherFeatures { + // First see if the program has generated Go code for this + // file linked in: + fd, err := protoregistry.GlobalFiles.FindFileByPath(feature.Name) + if err == nil { + standardImports[feature.Name] = fd + continue + } + fd, err = feature.GetDescriptor() + if err != nil { + // For these extensions to FeatureSet, we are lenient. If + // we can't load them, just ignore them. + continue + } + standardImports[feature.Name] = fd + } +} diff --git a/vendor/github.com/bufbuild/protocompile/supported_editions.go b/vendor/github.com/bufbuild/protocompile/supported_editions.go new file mode 100644 index 00000000..72bd51f1 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/supported_editions.go @@ -0,0 +1,30 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protocompile + +import ( + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/internal/editions" +) + +// IsEditionSupported returns true if this module can compile sources for +// the given edition. This returns true for the special EDITION_PROTO2 and +// EDITION_PROTO3 as well as all actual editions supported. +func IsEditionSupported(edition descriptorpb.Edition) bool { + return edition == descriptorpb.Edition_EDITION_PROTO2 || + edition == descriptorpb.Edition_EDITION_PROTO3 || + (edition >= editions.MinSupportedEdition && edition <= editions.MaxSupportedEdition) +} diff --git a/vendor/github.com/bufbuild/protocompile/walk/walk.go b/vendor/github.com/bufbuild/protocompile/walk/walk.go new file mode 100644 index 00000000..244fa720 --- /dev/null +++ b/vendor/github.com/bufbuild/protocompile/walk/walk.go @@ -0,0 +1,446 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package walk provides helper functions for traversing all elements in a +// protobuf file descriptor. There are versions both for traversing "rich" +// descriptors (protoreflect.Descriptor) and for traversing the underlying +// "raw" descriptor protos. +// +// # Enter And Exit +// +// This package includes variants of the functions that accept two callback +// functions. These variants have names ending with "EnterAndExit". One function +// is called as each element is visited ("enter") and the other is called after +// the element and all of its descendants have been visited ("exit"). This +// can be useful when you need to track state that is scoped to the visitation +// of a single element. +// +// # Source Path +// +// When traversing raw descriptor protos, this package include variants whose +// callback accepts a protoreflect.SourcePath. These variants have names that +// include "WithPath". This path can be used to locate corresponding data in the +// file's source code info (if present). +package walk + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/bufbuild/protocompile/internal" +) + +// Descriptors walks all descriptors in the given file using a depth-first +// traversal, calling the given function for each descriptor in the hierarchy. +// The walk ends when traversal is complete or when the function returns an +// error. If the function returns an error, that is returned as the result of the +// walk operation. +// +// Descriptors are visited using a pre-order traversal, where the function is +// called for a descriptor before it is called for any of its descendants. +func Descriptors(file protoreflect.FileDescriptor, fn func(protoreflect.Descriptor) error) error { + return DescriptorsEnterAndExit(file, fn, nil) +} + +// DescriptorsEnterAndExit walks all descriptors in the given file using a +// depth-first traversal, calling the given functions on entry and on exit +// for each descriptor in the hierarchy. The walk ends when traversal is +// complete or when a function returns an error. If a function returns an error, +// that is returned as the result of the walk operation. +// +// The enter function is called using a pre-order traversal, where the function +// is called for a descriptor before it is called for any of its descendants. +// The exit function is called using a post-order traversal, where the function +// is called for a descriptor only after it is called for any descendants. +func DescriptorsEnterAndExit(file protoreflect.FileDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + if err := walkContainer(file, enter, exit); err != nil { + return err + } + services := file.Services() + for i, length := 0, services.Len(); i < length; i++ { + svc := services.Get(i) + if err := enter(svc); err != nil { + return err + } + methods := svc.Methods() + for i, length := 0, methods.Len(); i < length; i++ { + mtd := methods.Get(i) + if err := enter(mtd); err != nil { + return err + } + if exit != nil { + if err := exit(mtd); err != nil { + return err + } + } + } + if exit != nil { + if err := exit(svc); err != nil { + return err + } + } + } + return nil +} + +type container interface { + Messages() protoreflect.MessageDescriptors + Enums() protoreflect.EnumDescriptors + Extensions() protoreflect.ExtensionDescriptors +} + +func walkContainer(container container, enter, exit func(protoreflect.Descriptor) error) error { + messages := container.Messages() + for i, length := 0, messages.Len(); i < length; i++ { + msg := messages.Get(i) + if err := messageDescriptor(msg, enter, exit); err != nil { + return err + } + } + enums := container.Enums() + for i, length := 0, enums.Len(); i < length; i++ { + en := enums.Get(i) + if err := enumDescriptor(en, enter, exit); err != nil { + return err + } + } + exts := container.Extensions() + for i, length := 0, exts.Len(); i < length; i++ { + ext := exts.Get(i) + if err := enter(ext); err != nil { + return err + } + if exit != nil { + if err := exit(ext); err != nil { + return err + } + } + } + return nil +} + +func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + if err := enter(msg); err != nil { + return err + } + fields := msg.Fields() + for i, length := 0, fields.Len(); i < length; i++ { + fld := fields.Get(i) + if err := enter(fld); err != nil { + return err + } + if exit != nil { + if err := exit(fld); err != nil { + return err + } + } + } + oneofs := msg.Oneofs() + for i, length := 0, oneofs.Len(); i < length; i++ { + oo := oneofs.Get(i) + if err := enter(oo); err != nil { + return err + } + if exit != nil { + if err := exit(oo); err != nil { + return err + } + } + } + if err := walkContainer(msg, enter, exit); err != nil { + return err + } + if exit != nil { + if err := exit(msg); err != nil { + return err + } + } + return nil +} + +func enumDescriptor(en protoreflect.EnumDescriptor, enter, exit func(protoreflect.Descriptor) error) error { + if err := enter(en); err != nil { + return err + } + vals := en.Values() + for i, length := 0, vals.Len(); i < length; i++ { + enVal := vals.Get(i) + if err := enter(enVal); err != nil { + return err + } + if exit != nil { + if err := exit(enVal); err != nil { + return err + } + } + } + if exit != nil { + if err := exit(en); err != nil { + return err + } + } + return nil +} + +// DescriptorProtosWithPath walks all descriptor protos in the given file using +// a depth-first traversal. This is the same as DescriptorProtos except that the +// callback function, fn, receives a protoreflect.SourcePath, that indicates the +// path for the element in the file's source code info. +func DescriptorProtosWithPath(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error { + return DescriptorProtosWithPathEnterAndExit(file, fn, nil) +} + +// DescriptorProtosWithPathEnterAndExit walks all descriptor protos in the given +// file using a depth-first traversal. This is the same as +// DescriptorProtosEnterAndExit except that the callback function, fn, receives +// a protoreflect.SourcePath, that indicates the path for the element in the +// file's source code info. +func DescriptorProtosWithPathEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error { + w := &protoWalker{usePath: true, enter: enter, exit: exit} + return w.walkDescriptorProtos(file) +} + +// DescriptorProtos walks all descriptor protos in the given file using a +// depth-first traversal, calling the given function for each descriptor proto +// in the hierarchy. The walk ends when traversal is complete or when the +// function returns an error. If the function returns an error, that is +// returned as the result of the walk operation. +// +// Descriptor protos are visited using a pre-order traversal, where the function +// is called for a descriptor before it is called for any of its descendants. +func DescriptorProtos(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, proto.Message) error) error { + return DescriptorProtosEnterAndExit(file, fn, nil) +} + +// DescriptorProtosEnterAndExit walks all descriptor protos in the given file +// using a depth-first traversal, calling the given functions on entry and on +// exit for each descriptor in the hierarchy. The walk ends when traversal is +// complete or when a function returns an error. If a function returns an error, +// that is returned as the result of the walk operation. +// +// The enter function is called using a pre-order traversal, where the function +// is called for a descriptor proto before it is called for any of its +// descendants. The exit function is called using a post-order traversal, where +// the function is called for a descriptor proto only after it is called for any +// descendants. +func DescriptorProtosEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, proto.Message) error) error { + enterWithPath := func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error { + return enter(n, m) + } + var exitWithPath func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error + if exit != nil { + exitWithPath = func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error { + return exit(n, m) + } + } + w := &protoWalker{ + enter: enterWithPath, + exit: exitWithPath, + } + return w.walkDescriptorProtos(file) +} + +type protoWalker struct { + usePath bool + enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error +} + +func (w *protoWalker) walkDescriptorProtos(file *descriptorpb.FileDescriptorProto) error { + prefix := file.GetPackage() + if prefix != "" { + prefix += "." + } + var path protoreflect.SourcePath + for i, msg := range file.MessageType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileMessagesTag, int32(i)) + } + if err := w.walkDescriptorProto(prefix, p, msg); err != nil { + return err + } + } + for i, en := range file.EnumType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileEnumsTag, int32(i)) + } + if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil { + return err + } + } + for i, ext := range file.Extension { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileExtensionsTag, int32(i)) + } + fqn := prefix + ext.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + } + } + for i, svc := range file.Service { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.FileServicesTag, int32(i)) + } + fqn := prefix + svc.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, svc); err != nil { + return err + } + for j, mtd := range svc.Method { + var mp protoreflect.SourcePath + if w.usePath { + mp = p + mp = append(mp, internal.ServiceMethodsTag, int32(j)) + } + mtdFqn := fqn + "." + mtd.GetName() + if err := w.enter(protoreflect.FullName(mtdFqn), mp, mtd); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(mtdFqn), mp, mtd); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, svc); err != nil { + return err + } + } + } + return nil +} + +func (w *protoWalker) walkDescriptorProto(prefix string, path protoreflect.SourcePath, msg *descriptorpb.DescriptorProto) error { + fqn := prefix + msg.GetName() + if err := w.enter(protoreflect.FullName(fqn), path, msg); err != nil { + return err + } + prefix = fqn + "." + for i, fld := range msg.Field { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageFieldsTag, int32(i)) + } + fqn := prefix + fld.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, fld); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, fld); err != nil { + return err + } + } + } + for i, oo := range msg.OneofDecl { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageOneofsTag, int32(i)) + } + fqn := prefix + oo.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, oo); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, oo); err != nil { + return err + } + } + } + for i, nested := range msg.NestedType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageNestedMessagesTag, int32(i)) + } + if err := w.walkDescriptorProto(prefix, p, nested); err != nil { + return err + } + } + for i, en := range msg.EnumType { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageEnumsTag, int32(i)) + } + if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil { + return err + } + } + for i, ext := range msg.Extension { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.MessageExtensionsTag, int32(i)) + } + fqn := prefix + ext.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), path, msg); err != nil { + return err + } + } + return nil +} + +func (w *protoWalker) walkEnumDescriptorProto(prefix string, path protoreflect.SourcePath, en *descriptorpb.EnumDescriptorProto) error { + fqn := prefix + en.GetName() + if err := w.enter(protoreflect.FullName(fqn), path, en); err != nil { + return err + } + for i, val := range en.Value { + var p protoreflect.SourcePath + if w.usePath { + p = path + p = append(p, internal.EnumValuesTag, int32(i)) + } + fqn := prefix + val.GetName() + if err := w.enter(protoreflect.FullName(fqn), p, val); err != nil { + return err + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), p, val); err != nil { + return err + } + } + } + if w.exit != nil { + if err := w.exit(protoreflect.FullName(fqn), path, en); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 00000000..c6f66f10 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,531 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 00000000..e9438a93 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,560 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 00000000..480e2448 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..fdff3fdb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..0ef27d33 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..d3c33259 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..b2b55dd8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..d0079ee3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..8368a3f7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..a76f8076 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..7ed347d3 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,21 @@ +# Changelog + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..a502fdc5 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..dc75f7d9 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -292,3 +298,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE new file mode 100644 index 00000000..b53b91d8 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Joshua Humphries + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go new file mode 100644 index 00000000..7e5c5684 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/codec.go @@ -0,0 +1,218 @@ +package codec + +import ( + "io" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/internal/codec" +) + +// ErrOverflow is returned when an integer is too large to be represented. +var ErrOverflow = codec.ErrOverflow + +// ErrBadWireType is returned when decoding a wire-type from a buffer that +// is not valid. +var ErrBadWireType = codec.ErrBadWireType + +// NB: much of the implementation is in an internal package, to avoid an import +// cycle between this codec package and the desc package. We export it from +// this package, but we can't use a type alias because we also need to add +// methods to it, to broaden the exposed API. + +// Buffer is a reader and a writer that wraps a slice of bytes and also +// provides API for decoding and encoding the protobuf binary format. +// +// Its operation is similar to that of a bytes.Buffer: writing pushes +// data to the end of the buffer while reading pops data from the head +// of the buffer. So the same buffer can be used to both read and write. +type Buffer codec.Buffer + +// NewBuffer creates a new buffer with the given slice of bytes as the +// buffer's initial contents. +func NewBuffer(buf []byte) *Buffer { + return (*Buffer)(codec.NewBuffer(buf)) +} + +// SetDeterministic sets this buffer to encode messages deterministically. This +// is useful for tests. But the overhead is non-zero, so it should not likely be +// used outside of tests. When true, map fields in a message must have their +// keys sorted before serialization to ensure deterministic output. Otherwise, +// values in a map field will be serialized in map iteration order. +func (cb *Buffer) SetDeterministic(deterministic bool) { + (*codec.Buffer)(cb).SetDeterministic(deterministic) +} + +// IsDeterministic returns whether or not this buffer is configured to encode +// messages deterministically. +func (cb *Buffer) IsDeterministic() bool { + return (*codec.Buffer)(cb).IsDeterministic() +} + +// Reset resets this buffer back to empty. Any subsequent writes/encodes +// to the buffer will allocate a new backing slice of bytes. +func (cb *Buffer) Reset() { + (*codec.Buffer)(cb).Reset() +} + +// Bytes returns the slice of bytes remaining in the buffer. Note that +// this does not perform a copy: if the contents of the returned slice +// are modified, the modifications will be visible to subsequent reads +// via the buffer. +func (cb *Buffer) Bytes() []byte { + return (*codec.Buffer)(cb).Bytes() +} + +// String returns the remaining bytes in the buffer as a string. +func (cb *Buffer) String() string { + return (*codec.Buffer)(cb).String() +} + +// EOF returns true if there are no more bytes remaining to read. +func (cb *Buffer) EOF() bool { + return (*codec.Buffer)(cb).EOF() +} + +// Skip attempts to skip the given number of bytes in the input. If +// the input has fewer bytes than the given count, io.ErrUnexpectedEOF +// is returned and the buffer is unchanged. Otherwise, the given number +// of bytes are skipped and nil is returned. +func (cb *Buffer) Skip(count int) error { + return (*codec.Buffer)(cb).Skip(count) + +} + +// Len returns the remaining number of bytes in the buffer. +func (cb *Buffer) Len() int { + return (*codec.Buffer)(cb).Len() +} + +// Read implements the io.Reader interface. If there are no bytes +// remaining in the buffer, it will return 0, io.EOF. Otherwise, +// it reads max(len(dest), cb.Len()) bytes from input and copies +// them into dest. It returns the number of bytes copied and a nil +// error in this case. +func (cb *Buffer) Read(dest []byte) (int, error) { + return (*codec.Buffer)(cb).Read(dest) +} + +var _ io.Reader = (*Buffer)(nil) + +// Write implements the io.Writer interface. It always returns +// len(data), nil. +func (cb *Buffer) Write(data []byte) (int, error) { + return (*codec.Buffer)(cb).Write(data) +} + +var _ io.Writer = (*Buffer)(nil) + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) DecodeVarint() (uint64, error) { + return (*codec.Buffer)(cb).DecodeVarint() +} + +// DecodeTagAndWireType decodes a field tag and wire type from input. +// This reads a varint and then extracts the two fields from the varint +// value read. +func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) { + return (*codec.Buffer)(cb).DecodeTagAndWireType() +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) DecodeFixed64() (x uint64, err error) { + return (*codec.Buffer)(cb).DecodeFixed64() +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) DecodeFixed32() (x uint64, err error) { + return (*codec.Buffer)(cb).DecodeFixed32() +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + return (*codec.Buffer)(cb).DecodeRawBytes(alloc) +} + +// ReadGroup reads the input until a "group end" tag is found +// and returns the data up to that point. Subsequent reads from +// the buffer will read data after the group end tag. If alloc +// is true, the data is copied to a new slice before being returned. +// Otherwise, the returned slice is a view into the buffer's +// underlying byte slice. +// +// This function correctly handles nested groups: if a "group start" +// tag is found, then that group's end tag will be included in the +// returned data. +func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) { + return (*codec.Buffer)(cb).ReadGroup(alloc) +} + +// SkipGroup is like ReadGroup, except that it discards the +// data and just advances the buffer to point to the input +// right *after* the "group end" tag. +func (cb *Buffer) SkipGroup() error { + return (*codec.Buffer)(cb).SkipGroup() +} + +// SkipField attempts to skip the value of a field with the given wire +// type. When consuming a protobuf-encoded stream, it can be called immediately +// after DecodeTagAndWireType to discard the subsequent data for the field. +func (cb *Buffer) SkipField(wireType int8) error { + return (*codec.Buffer)(cb).SkipField(wireType) +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) EncodeVarint(x uint64) error { + return (*codec.Buffer)(cb).EncodeVarint(x) +} + +// EncodeTagAndWireType encodes the given field tag and wire type to the +// buffer. This combines the two values and then writes them as a varint. +func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error { + return (*codec.Buffer)(cb).EncodeTagAndWireType(tag, wireType) +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) EncodeFixed64(x uint64) error { + return (*codec.Buffer)(cb).EncodeFixed64(x) + +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) EncodeFixed32(x uint64) error { + return (*codec.Buffer)(cb).EncodeFixed32(x) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) EncodeRawBytes(b []byte) error { + return (*codec.Buffer)(cb).EncodeRawBytes(b) +} + +// EncodeMessage writes the given message to the buffer. +func (cb *Buffer) EncodeMessage(pm proto.Message) error { + return (*codec.Buffer)(cb).EncodeMessage(pm) +} + +// EncodeDelimitedMessage writes the given message to the buffer with a +// varint-encoded length prefix (the delimiter). +func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error { + return (*codec.Buffer)(cb).EncodeDelimitedMessage(pm) +} diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go new file mode 100644 index 00000000..0edb817c --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go @@ -0,0 +1,318 @@ +package codec + +import ( + "errors" + "fmt" + "io" + "math" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" +) + +var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{} +var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{} +var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{} + +func init() { + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true + varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true + + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true + fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true + + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true + fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true +} + +// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type +// it reads indicates an end-group marker. +var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group") + +// MessageFactory is used to instantiate messages when DecodeFieldValue needs to +// decode a message value. +// +// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which +// implements this interface. +type MessageFactory interface { + NewMessage(md *desc.MessageDescriptor) proto.Message +} + +// UnknownField represents a field that was parsed from the binary wire +// format for a message, but was not a recognized field number. Enough +// information is preserved so that re-serializing the message won't lose +// any of the unrecognized data. +type UnknownField struct { + // The tag number for the unrecognized field. + Tag int32 + + // Encoding indicates how the unknown field was encoded on the wire. If it + // is proto.WireBytes or proto.WireGroupStart then Contents will be set to + // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least + // significant 32 bits of Value. Otherwise, the data is in all 64 bits of + // Value. + Encoding int8 + Contents []byte + Value uint64 +} + +// DecodeZigZag32 decodes a signed 32-bit integer from the given +// zig-zag encoded value. +func DecodeZigZag32(v uint64) int32 { + return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)) +} + +// DecodeZigZag64 decodes a signed 64-bit integer from the given +// zig-zag encoded value. +func DecodeZigZag64(v uint64) int64 { + return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63)) +} + +// DecodeFieldValue will read a field value from the buffer and return its +// value and the corresponding field descriptor. The given function is used +// to lookup a field descriptor by tag number. The given factory is used to +// instantiate a message if the field value is (or contains) a message value. +// +// On error, the field descriptor and value are typically nil. However, if the +// error returned is ErrWireTypeEndGroup, the returned value will indicate any +// tag number encoded in the end-group marker. +// +// If the field descriptor returned is nil, that means that the given function +// returned nil. This is expected to happen for unrecognized tag numbers. In +// that case, no error is returned, and the value will be an UnknownField. +func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) { + if cb.EOF() { + return nil, nil, io.EOF + } + tagNumber, wireType, err := cb.DecodeTagAndWireType() + if err != nil { + return nil, nil, err + } + if wireType == proto.WireEndGroup { + return nil, tagNumber, ErrWireTypeEndGroup + } + fd := fieldFinder(tagNumber) + if fd == nil { + val, err := cb.decodeUnknownField(tagNumber, wireType) + return nil, val, err + } + val, err := cb.decodeKnownField(fd, wireType, fact) + return fd, val, err +} + +// DecodeScalarField extracts a properly-typed value from v. The returned value's +// type depends on the given field descriptor type. It will be the same type as +// generated structs use for the field descriptor's type. Enum types will return +// an int32. If the given field type uses length-delimited encoding (nested +// messages, bytes, and strings), an error is returned. +func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return v != 0, nil + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return uint32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_ENUM: + s := int64(v) + if s > math.MaxInt32 || s < math.MinInt32 { + return nil, ErrOverflow + } + return int32(s), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return int32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SINT32: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return DecodeZigZag32(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + return v, nil + + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + return int64(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return DecodeZigZag64(v), nil + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if v > math.MaxUint32 { + return nil, ErrOverflow + } + return math.Float32frombits(uint32(v)), nil + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return math.Float64frombits(v), nil + + default: + // bytes, string, message, and group cannot be represented as a simple numeric value + return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName()) + } +} + +// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The +// returned value's type will usually be []byte, string, or, for nested messages, +// the type returned from the given message factory. However, since repeated +// scalar fields can be length-delimited, when they used packed encoding, it can +// also return an []interface{}, where each element is a scalar value. Furthermore, +// it could return a scalar type, not in a slice, if the given field descriptor is +// not repeated. This is to support cases where a field is changed from optional +// to repeated. New code may emit a packed repeated representation, but old code +// still expects a single scalar value. In this case, if the actual data in bytes +// contains multiple values, only the last value is returned. +func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) { + switch { + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return bytes, nil + + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING: + return string(bytes), nil + + case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || + fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP: + msg := mf.NewMessage(fd.GetMessageType()) + err := proto.Unmarshal(bytes, msg) + if err != nil { + return nil, err + } else { + return msg, nil + } + + default: + // even if the field is not repeated or not packed, we still parse it as such for + // backwards compatibility (e.g. message we are de-serializing could have been both + // repeated and packed at the time of serialization) + packedBuf := NewBuffer(bytes) + var slice []interface{} + var val interface{} + for !packedBuf.EOF() { + var v uint64 + var err error + if varintTypes[fd.GetType()] { + v, err = packedBuf.DecodeVarint() + } else if fixed32Types[fd.GetType()] { + v, err = packedBuf.DecodeFixed32() + } else if fixed64Types[fd.GetType()] { + v, err = packedBuf.DecodeFixed64() + } else { + return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName()) + } + if err != nil { + return nil, err + } + val, err = DecodeScalarField(fd, v) + if err != nil { + return nil, err + } + if fd.IsRepeated() { + slice = append(slice, val) + } + } + if fd.IsRepeated() { + return slice, nil + } else { + // if not a repeated field, last value wins + return val, nil + } + } +} + +func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) { + var val interface{} + var err error + switch encoding { + case proto.WireFixed32: + var num uint64 + num, err = b.DecodeFixed32() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + case proto.WireFixed64: + var num uint64 + num, err = b.DecodeFixed64() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + case proto.WireVarint: + var num uint64 + num, err = b.DecodeVarint() + if err == nil { + val, err = DecodeScalarField(fd, num) + } + + case proto.WireBytes: + alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES + var raw []byte + raw, err = b.DecodeRawBytes(alloc) + if err == nil { + val, err = DecodeLengthDelimitedField(fd, raw, fact) + } + + case proto.WireStartGroup: + if fd.GetMessageType() == nil { + return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName()) + } + msg := fact.NewMessage(fd.GetMessageType()) + var data []byte + data, err = b.ReadGroup(false) + if err == nil { + err = proto.Unmarshal(data, msg) + if err == nil { + val = msg + } + } + + default: + return nil, ErrBadWireType + } + if err != nil { + return nil, err + } + + return val, nil +} + +func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) { + u := UnknownField{Tag: tagNumber, Encoding: encoding} + var err error + switch encoding { + case proto.WireFixed32: + u.Value, err = b.DecodeFixed32() + case proto.WireFixed64: + u.Value, err = b.DecodeFixed64() + case proto.WireVarint: + u.Value, err = b.DecodeVarint() + case proto.WireBytes: + u.Contents, err = b.DecodeRawBytes(true) + case proto.WireStartGroup: + u.Contents, err = b.ReadGroup(true) + default: + err = ErrBadWireType + } + if err != nil { + return nil, err + } + return u, nil +} diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go new file mode 100644 index 00000000..f76499f6 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/doc.go @@ -0,0 +1,7 @@ +// Package codec contains a reader/write type that assists with encoding +// and decoding protobuf's binary representation. +// +// The code in this package began as a fork of proto.Buffer but provides +// additional API to make it more useful to code that needs to dynamically +// process or produce the protobuf binary format. +package codec diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go new file mode 100644 index 00000000..280f730f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go @@ -0,0 +1,288 @@ +package codec + +import ( + "fmt" + "math" + "reflect" + "sort" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" +) + +// EncodeZigZag64 does zig-zag encoding to convert the given +// signed 64-bit integer into a form that can be expressed +// efficiently as a varint, even for negative values. +func EncodeZigZag64(v int64) uint64 { + return (uint64(v) << 1) ^ uint64(v>>63) +} + +// EncodeZigZag32 does zig-zag encoding to convert the given +// signed 32-bit integer into a form that can be expressed +// efficiently as a varint, even for negative values. +func EncodeZigZag32(v int32) uint64 { + return uint64((uint32(v) << 1) ^ uint32((v >> 31))) +} + +func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error { + if fd.IsMap() { + mp := val.(map[interface{}]interface{}) + entryType := fd.GetMessageType() + keyType := entryType.FindFieldByNumber(1) + valType := entryType.FindFieldByNumber(2) + var entryBuffer Buffer + if cb.IsDeterministic() { + entryBuffer.SetDeterministic(true) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + for _, k := range keys { + v := mp[k] + entryBuffer.Reset() + if err := entryBuffer.encodeFieldElement(keyType, k); err != nil { + return err + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if err := entryBuffer.encodeFieldElement(valType, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil { + return err + } + } + } else { + for k, v := range mp { + entryBuffer.Reset() + if err := entryBuffer.encodeFieldElement(keyType, k); err != nil { + return err + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || !rv.IsNil() { + if err := entryBuffer.encodeFieldElement(valType, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil { + return err + } + } + } + return nil + } else if fd.IsRepeated() { + sl := val.([]interface{}) + wt, err := getWireType(fd.GetType()) + if err != nil { + return err + } + if isPacked(fd) && len(sl) > 0 && + (wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) { + // packed repeated field + var packedBuffer Buffer + for _, v := range sl { + if err := packedBuffer.encodeFieldValue(fd, v); err != nil { + return err + } + } + if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil { + return err + } + return cb.EncodeRawBytes(packedBuffer.Bytes()) + } else { + // non-packed repeated field + for _, v := range sl { + if err := cb.encodeFieldElement(fd, v); err != nil { + return err + } + } + return nil + } + } else { + return cb.encodeFieldElement(fd, val) + } +} + +func isPacked(fd *desc.FieldDescriptor) bool { + opts := fd.AsFieldDescriptorProto().GetOptions() + // if set, use that value + if opts != nil && opts.Packed != nil { + return opts.GetPacked() + } + // if unset: proto2 defaults to false, proto3 to true + return fd.GetFile().IsProto3() +} + +// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64), +// bools, or strings. +type sortable []interface{} + +func (s sortable) Len() int { + return len(s) +} + +func (s sortable) Less(i, j int) bool { + vi := s[i] + vj := s[j] + switch reflect.TypeOf(vi).Kind() { + case reflect.Int32: + return vi.(int32) < vj.(int32) + case reflect.Int64: + return vi.(int64) < vj.(int64) + case reflect.Uint32: + return vi.(uint32) < vj.(uint32) + case reflect.Uint64: + return vi.(uint64) < vj.(uint64) + case reflect.String: + return vi.(string) < vj.(string) + case reflect.Bool: + return !vi.(bool) && vj.(bool) + default: + panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi))) + } +} + +func (s sortable) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error { + wt, err := getWireType(fd.GetType()) + if err != nil { + return err + } + if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil { + return err + } + if err := b.encodeFieldValue(fd, val); err != nil { + return err + } + if wt == proto.WireStartGroup { + return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup) + } + return nil +} + +func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + v := val.(bool) + if v { + return b.EncodeVarint(1) + } + return b.EncodeVarint(0) + + case descriptorpb.FieldDescriptorProto_TYPE_ENUM, + descriptorpb.FieldDescriptorProto_TYPE_INT32: + v := val.(int32) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + v := val.(int32) + return b.EncodeFixed32(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SINT32: + v := val.(int32) + return b.EncodeVarint(EncodeZigZag32(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_UINT32: + v := val.(uint32) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + v := val.(uint32) + return b.EncodeFixed32(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_INT64: + v := val.(int64) + return b.EncodeVarint(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + v := val.(int64) + return b.EncodeFixed64(uint64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_SINT64: + v := val.(int64) + return b.EncodeVarint(EncodeZigZag64(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64: + v := val.(uint64) + return b.EncodeVarint(v) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + v := val.(uint64) + return b.EncodeFixed64(v) + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + v := val.(float64) + return b.EncodeFixed64(math.Float64bits(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + v := val.(float32) + return b.EncodeFixed32(uint64(math.Float32bits(v))) + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + v := val.([]byte) + return b.EncodeRawBytes(v) + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + v := val.(string) + return b.EncodeRawBytes(([]byte)(v)) + + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE: + return b.EncodeDelimitedMessage(val.(proto.Message)) + + case descriptorpb.FieldDescriptorProto_TYPE_GROUP: + // just append the nested message to this buffer + return b.EncodeMessage(val.(proto.Message)) + // whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag + + default: + return fmt.Errorf("unrecognized field type: %v", fd.GetType()) + } +} + +func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) { + switch t { + case descriptorpb.FieldDescriptorProto_TYPE_ENUM, + descriptorpb.FieldDescriptorProto_TYPE_BOOL, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return proto.WireVarint, nil + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return proto.WireFixed32, nil + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return proto.WireFixed64, nil + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES, + descriptorpb.FieldDescriptorProto_TYPE_STRING, + descriptorpb.FieldDescriptorProto_TYPE_MESSAGE: + return proto.WireBytes, nil + + case descriptorpb.FieldDescriptorProto_TYPE_GROUP: + return proto.WireStartGroup, nil + + default: + return 0, ErrBadWireType + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go new file mode 100644 index 00000000..418632b7 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/cache.go @@ -0,0 +1,48 @@ +package desc + +import ( + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type descriptorCache interface { + get(protoreflect.Descriptor) Descriptor + put(protoreflect.Descriptor, Descriptor) +} + +type lockingCache struct { + cacheMu sync.RWMutex + cache mapCache +} + +func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor { + c.cacheMu.RLock() + defer c.cacheMu.RUnlock() + return c.cache.get(d) +} + +func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + c.cache.put(key, val) +} + +func (c *lockingCache) withLock(fn func(descriptorCache)) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + // Pass the underlying mapCache. We don't want fn to use + // c.get or c.put sine we already have the lock. So those + // methods would try to re-acquire and then deadlock! + fn(c.cache) +} + +type mapCache map[protoreflect.Descriptor]Descriptor + +func (c mapCache) get(d protoreflect.Descriptor) Descriptor { + return c[d] +} + +func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) { + c[key] = val +} diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go new file mode 100644 index 00000000..01a6e9ea --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/convert.go @@ -0,0 +1,294 @@ +package desc + +import ( + "errors" + "fmt" + "strings" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" + intn "github.com/jhump/protoreflect/internal" +) + +// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto. +// The file's direct dependencies must be provided. If the given dependencies do not include +// all of the file's dependencies or if the contents of the descriptors are internally +// inconsistent (e.g. contain unresolvable symbols) then an error is returned. +func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) { + return createFileDescriptor(fd, deps, nil) +} + +type descResolver struct { + files []*FileDescriptor + importResolver *ImportResolver + fromPath string +} + +func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + resolvedPath := r.importResolver.ResolveImport(r.fromPath, path) + d := r.findFileByPath(resolvedPath) + if d != nil { + return d, nil + } + if resolvedPath != path { + d := r.findFileByPath(path) + if d != nil { + return d, nil + } + } + return nil, protoregistry.NotFound +} + +func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor { + for _, fd := range r.files { + if fd.GetName() == path { + return fd.UnwrapFile() + } + } + return nil +} + +func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) { + for _, fd := range r.files { + d := fd.FindSymbol(string(n)) + if d != nil { + return d.(DescriptorWrapper).Unwrap(), nil + } + } + return nil, protoregistry.NotFound +} + +func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) { + dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()} + d, err := protodesc.NewFile(fd, dr) + if err != nil { + return nil, err + } + + // make sure cache has dependencies populated + cache := mapCache{} + for _, dep := range deps { + fd, err := dr.FindFileByPath(dep.GetName()) + if err != nil { + return nil, err + } + cache.put(fd, dep) + } + + return convertFile(d, fd, cache) +} + +func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) { + ret := &FileDescriptor{ + wrapped: d, + proto: fd, + symbols: map[string]Descriptor{}, + fieldIndex: map[string]map[int32]*FieldDescriptor{}, + } + cache.put(d, ret) + + // populate references to file descriptor dependencies + ret.deps = make([]*FileDescriptor, len(fd.GetDependency())) + for i := 0; i < d.Imports().Len(); i++ { + f := d.Imports().Get(i).FileDescriptor + if c, err := wrapFile(f, cache); err != nil { + return nil, err + } else { + ret.deps[i] = c + } + } + ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency())) + for i, pd := range fd.GetPublicDependency() { + ret.publicDeps[i] = ret.deps[pd] + } + ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency())) + for i, wd := range fd.GetWeakDependency() { + ret.weakDeps[i] = ret.deps[wd] + } + + // populate all tables of child descriptors + path := make([]int32, 1, 8) + path[0] = internal.File_messagesTag + for i := 0; i < d.Messages().Len(); i++ { + src := d.Messages().Get(i) + srcProto := fd.GetMessageType()[src.Index()] + md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = md + ret.messages = append(ret.messages, md) + } + path[0] = internal.File_enumsTag + for i := 0; i < d.Enums().Len(); i++ { + src := d.Enums().Get(i) + srcProto := fd.GetEnumType()[src.Index()] + ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = ed + ret.enums = append(ret.enums, ed) + } + path[0] = internal.File_extensionsTag + for i := 0; i < d.Extensions().Len(); i++ { + src := d.Extensions().Get(i) + srcProto := fd.GetExtension()[src.Index()] + exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i))) + ret.symbols[string(src.FullName())] = exd + ret.extensions = append(ret.extensions, exd) + } + path[0] = internal.File_servicesTag + for i := 0; i < d.Services().Len(); i++ { + src := d.Services().Get(i) + srcProto := fd.GetService()[src.Index()] + sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i))) + ret.symbols[string(src.FullName())] = sd + ret.services = append(ret.services, sd) + } + + ret.sourceInfo = internal.CreateSourceInfoMap(fd) + ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo + + // now we can resolve all type references and source code info + for _, md := range ret.messages { + if err := md.resolve(cache); err != nil { + return nil, err + } + } + path[0] = internal.File_extensionsTag + for _, exd := range ret.extensions { + if err := exd.resolve(cache); err != nil { + return nil, err + } + } + path[0] = internal.File_servicesTag + for _, sd := range ret.services { + if err := sd.resolve(cache); err != nil { + return nil, err + } + } + + return ret, nil +} + +// CreateFileDescriptors constructs a set of descriptors, one for each of the +// given descriptor protos. The given set of descriptor protos must include all +// transitive dependencies for every file. +func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) { + return createFileDescriptors(fds, nil) +} + +func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) { + if len(fds) == 0 { + return nil, nil + } + files := map[string]*descriptorpb.FileDescriptorProto{} + resolved := map[string]*FileDescriptor{} + var name string + for _, fd := range fds { + name = fd.GetName() + files[name] = fd + } + for _, fd := range fds { + _, err := createFromSet(fd.GetName(), r, nil, files, resolved) + if err != nil { + return nil, err + } + } + return resolved, nil +} + +// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given +// file descriptors and their transitive dependencies. The files are topologically sorted +// so that a file will always appear after its dependencies. +func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet { + var fdps []*descriptorpb.FileDescriptorProto + addAllFiles(fds, &fdps, map[string]struct{}{}) + return &descriptorpb.FileDescriptorSet{File: fdps} +} + +func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) { + for _, fd := range src { + if _, ok := seen[fd.GetName()]; ok { + continue + } + seen[fd.GetName()] = struct{}{} + addAllFiles(fd.GetDependencies(), results, seen) + *results = append(*results, fd.AsFileDescriptorProto()) + } +} + +// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The +// set's *last* file will be the returned descriptor. The set's remaining files must comprise +// the full set of transitive dependencies of that last file. This is the same format and +// order used by protoc when emitting a FileDescriptorSet file with an invocation like so: +// +// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto +func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) { + return createFileDescriptorFromSet(fds, nil) +} + +func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) { + result, err := createFileDescriptorsFromSet(fds, r) + if err != nil { + return nil, err + } + files := fds.GetFile() + lastFilename := files[len(files)-1].GetName() + return result[lastFilename], nil +} + +// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set. +// The returned map includes all files in the set, keyed b name. The set must include the +// full set of transitive dependencies for all files therein or else a link error will occur +// and be returned instead of the slice of descriptors. This is the same format used by +// protoc when a FileDescriptorSet file with an invocation like so: +// +// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto +func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) { + return createFileDescriptorsFromSet(fds, nil) +} + +func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) { + files := fds.GetFile() + if len(files) == 0 { + return nil, errors.New("file descriptor set is empty") + } + return createFileDescriptors(files, r) +} + +// createFromSet creates a descriptor for the given filename. It recursively +// creates descriptors for the given file's dependencies. +func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) { + for _, s := range seen { + if filename == s { + return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> ")) + } + } + seen = append(seen, filename) + + if d, ok := resolved[filename]; ok { + return d, nil + } + fdp := files[filename] + if fdp == nil { + return nil, intn.ErrNoSuchFile(filename) + } + deps := make([]*FileDescriptor, len(fdp.GetDependency())) + for i, depName := range fdp.GetDependency() { + resolvedDep := r.ResolveImport(filename, depName) + dep, err := createFromSet(resolvedDep, r, seen, files, resolved) + if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName { + dep, err = createFromSet(depName, r, seen, files, resolved) + } + if err != nil { + return nil, err + } + deps[i] = dep + } + d, err := createFileDescriptor(fdp, deps, r) + if err != nil { + return nil, err + } + resolved[filename] = d + return d, nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go new file mode 100644 index 00000000..38b8f51b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go @@ -0,0 +1,1847 @@ +package desc + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" +) + +// Descriptor is the common interface implemented by all descriptor objects. +type Descriptor interface { + // GetName returns the name of the object described by the descriptor. This will + // be a base name that does not include enclosing message names or the package name. + // For file descriptors, this indicates the path and name to the described file. + GetName() string + // GetFullyQualifiedName returns the fully-qualified name of the object described by + // the descriptor. This will include the package name and any enclosing message names. + // For file descriptors, this returns the path and name to the described file (same as + // GetName). + GetFullyQualifiedName() string + // GetParent returns the enclosing element in a proto source file. If the described + // object is a top-level object, this returns the file descriptor. Otherwise, it returns + // the element in which the described object was declared. File descriptors have no + // parent and return nil. + GetParent() Descriptor + // GetFile returns the file descriptor in which this element was declared. File + // descriptors return themselves. + GetFile() *FileDescriptor + // GetOptions returns the options proto containing options for the described element. + GetOptions() proto.Message + // GetSourceInfo returns any source code information that was present in the file + // descriptor. Source code info is optional. If no source code info is available for + // the element (including if there is none at all in the file descriptor) then this + // returns nil + GetSourceInfo() *descriptorpb.SourceCodeInfo_Location + // AsProto returns the underlying descriptor proto for this descriptor. + AsProto() proto.Message +} + +type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc + +// FileDescriptor describes a proto source file. +type FileDescriptor struct { + wrapped protoreflect.FileDescriptor + proto *descriptorpb.FileDescriptorProto + symbols map[string]Descriptor + deps []*FileDescriptor + publicDeps []*FileDescriptor + weakDeps []*FileDescriptor + messages []*MessageDescriptor + enums []*EnumDescriptor + extensions []*FieldDescriptor + services []*ServiceDescriptor + fieldIndex map[string]map[int32]*FieldDescriptor + sourceInfo internal.SourceInfoMap + sourceInfoRecomputeFunc +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapFile, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor { + return fd.wrapped +} + +// UnwrapFile returns the underlying protoreflect.FileDescriptor. +func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor { + return fd.wrapped +} + +func (fd *FileDescriptor) recomputeSourceInfo() { + internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo) +} + +func (fd *FileDescriptor) registerField(field *FieldDescriptor) { + fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()] + if fields == nil { + fields = map[int32]*FieldDescriptor{} + fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields + } + fields[field.GetNumber()] = field +} + +// GetName returns the name of the file, as it was given to the protoc invocation +// to compile it, possibly including path (relative to a directory in the proto +// import path). +func (fd *FileDescriptor) GetName() string { + return fd.wrapped.Path() +} + +// GetFullyQualifiedName returns the name of the file, same as GetName. It is +// present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetFullyQualifiedName() string { + return fd.wrapped.Path() +} + +// GetPackage returns the name of the package declared in the file. +func (fd *FileDescriptor) GetPackage() string { + return string(fd.wrapped.Package()) +} + +// GetParent always returns nil: files are the root of descriptor hierarchies. +// Is it present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetParent() Descriptor { + return nil +} + +// GetFile returns the receiver, which is a file descriptor. This is present +// to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetFile() *FileDescriptor { + return fd +} + +// GetOptions returns the file's options. Most usages will be more interested +// in GetFileOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (fd *FileDescriptor) GetOptions() proto.Message { + return fd.proto.GetOptions() +} + +// GetFileOptions returns the file's options. +func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions { + return fd.proto.GetOptions() +} + +// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor +// interface. +func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return nil +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsFileDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (fd *FileDescriptor) AsProto() proto.Message { + return fd.proto +} + +// AsFileDescriptorProto returns the underlying descriptor proto. +func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto { + return fd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (fd *FileDescriptor) String() string { + return fd.proto.String() +} + +// IsProto3 returns true if the file declares a syntax of "proto3". +// +// When this returns false, the file is either syntax "proto2" (if +// Edition() returns zero) or the file uses editions. +func (fd *FileDescriptor) IsProto3() bool { + return fd.wrapped.Syntax() == protoreflect.Proto3 +} + +// Edition returns the edition of the file. If the file does not +// use editions syntax, zero is returned. +func (fd *FileDescriptor) Edition() descriptorpb.Edition { + if fd.wrapped.Syntax() == protoreflect.Editions { + return fd.proto.GetEdition() + } + return 0 +} + +// GetDependencies returns all of this file's dependencies. These correspond to +// import statements in the file. +func (fd *FileDescriptor) GetDependencies() []*FileDescriptor { + return fd.deps +} + +// GetPublicDependencies returns all of this file's public dependencies. These +// correspond to public import statements in the file. +func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor { + return fd.publicDeps +} + +// GetWeakDependencies returns all of this file's weak dependencies. These +// correspond to weak import statements in the file. +func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor { + return fd.weakDeps +} + +// GetMessageTypes returns all top-level messages declared in this file. +func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor { + return fd.messages +} + +// GetEnumTypes returns all top-level enums declared in this file. +func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor { + return fd.enums +} + +// GetExtensions returns all top-level extensions declared in this file. +func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor { + return fd.extensions +} + +// GetServices returns all services declared in this file. +func (fd *FileDescriptor) GetServices() []*ServiceDescriptor { + return fd.services +} + +// FindSymbol returns the descriptor contained within this file for the +// element with the given fully-qualified symbol name. If no such element +// exists then this method returns nil. +func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor { + if len(symbol) == 0 { + return nil + } + if symbol[0] == '.' { + symbol = symbol[1:] + } + if ret := fd.symbols[symbol]; ret != nil { + return ret + } + + // allow accessing symbols through public imports, too + for _, dep := range fd.GetPublicDependencies() { + if ret := dep.FindSymbol(symbol); ret != nil { + return ret + } + } + + // not found + return nil +} + +// FindMessage finds the message with the given fully-qualified name. If no +// such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor { + if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok { + return md + } else { + return nil + } +} + +// FindEnum finds the enum with the given fully-qualified name. If no such +// element exists in this file then nil is returned. +func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor { + if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok { + return ed + } else { + return nil + } +} + +// FindService finds the service with the given fully-qualified name. If no +// such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor { + if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok { + return sd + } else { + return nil + } +} + +// FindExtension finds the extension field for the given extended type name and +// tag number. If no such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor { + if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() { + return exd + } else { + return nil + } +} + +// FindExtensionByName finds the extension field with the given fully-qualified +// name. If no such element exists in this file then nil is returned. +func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor { + if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() { + return exd + } else { + return nil + } +} + +// MessageDescriptor describes a protocol buffer message. +type MessageDescriptor struct { + wrapped protoreflect.MessageDescriptor + proto *descriptorpb.DescriptorProto + parent Descriptor + file *FileDescriptor + fields []*FieldDescriptor + nested []*MessageDescriptor + enums []*EnumDescriptor + extensions []*FieldDescriptor + oneOfs []*OneOfDescriptor + extRanges extRanges + sourceInfoPath []int32 + jsonNames jsonNameMap +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapMessage, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor { + return md.wrapped +} + +// UnwrapMessage returns the underlying protoreflect.MessageDescriptor. +func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor { + return md.wrapped +} + +func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor { + ret := &MessageDescriptor{ + wrapped: md, + proto: mdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + cache.put(md, ret) + path = append(path, internal.Message_nestedMessagesTag) + for i := 0; i < md.Messages().Len(); i++ { + src := md.Messages().Get(i) + srcProto := mdp.GetNestedType()[src.Index()] + nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i))) + symbols[string(src.FullName())] = nmd + ret.nested = append(ret.nested, nmd) + } + path[len(path)-1] = internal.Message_enumsTag + for i := 0; i < md.Enums().Len(); i++ { + src := md.Enums().Get(i) + srcProto := mdp.GetEnumType()[src.Index()] + ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i))) + symbols[string(src.FullName())] = ed + ret.enums = append(ret.enums, ed) + } + path[len(path)-1] = internal.Message_fieldsTag + for i := 0; i < md.Fields().Len(); i++ { + src := md.Fields().Get(i) + srcProto := mdp.GetField()[src.Index()] + fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i))) + symbols[string(src.FullName())] = fld + ret.fields = append(ret.fields, fld) + } + path[len(path)-1] = internal.Message_extensionsTag + for i := 0; i < md.Extensions().Len(); i++ { + src := md.Extensions().Get(i) + srcProto := mdp.GetExtension()[src.Index()] + exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i))) + symbols[string(src.FullName())] = exd + ret.extensions = append(ret.extensions, exd) + } + path[len(path)-1] = internal.Message_oneOfsTag + for i := 0; i < md.Oneofs().Len(); i++ { + src := md.Oneofs().Get(i) + srcProto := mdp.GetOneofDecl()[src.Index()] + od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = od + ret.oneOfs = append(ret.oneOfs, od) + } + for _, r := range mdp.GetExtensionRange() { + // proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code). + // but protoc converts range to exclusive end in descriptor, so we must convert back + end := r.GetEnd() - 1 + ret.extRanges = append(ret.extRanges, proto.ExtensionRange{ + Start: r.GetStart(), + End: end}) + } + sort.Sort(ret.extRanges) + + return ret +} + +func (md *MessageDescriptor) resolve(cache descriptorCache) error { + for _, nmd := range md.nested { + if err := nmd.resolve(cache); err != nil { + return err + } + } + for _, fld := range md.fields { + if err := fld.resolve(cache); err != nil { + return err + } + } + for _, exd := range md.extensions { + if err := exd.resolve(cache); err != nil { + return err + } + } + return nil +} + +// GetName returns the simple (unqualified) name of the message. +func (md *MessageDescriptor) GetName() string { + return string(md.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the message. This +// includes the package name (if there is one) as well as the names of any +// enclosing messages. +func (md *MessageDescriptor) GetFullyQualifiedName() string { + return string(md.wrapped.FullName()) +} + +// GetParent returns the message's enclosing descriptor. For top-level messages, +// this will be a file descriptor. Otherwise it will be the descriptor for the +// enclosing message. +func (md *MessageDescriptor) GetParent() Descriptor { + return md.parent +} + +// GetFile returns the descriptor for the file in which this message is defined. +func (md *MessageDescriptor) GetFile() *FileDescriptor { + return md.file +} + +// GetOptions returns the message's options. Most usages will be more interested +// in GetMessageOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (md *MessageDescriptor) GetOptions() proto.Message { + return md.proto.GetOptions() +} + +// GetMessageOptions returns the message's options. +func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions { + return md.proto.GetOptions() +} + +// GetSourceInfo returns source info for the message, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// message was defined and also contains comments associated with the message +// definition. +func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return md.file.sourceInfo.Get(md.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (md *MessageDescriptor) AsProto() proto.Message { + return md.proto +} + +// AsDescriptorProto returns the underlying descriptor proto. +func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto { + return md.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (md *MessageDescriptor) String() string { + return md.proto.String() +} + +// IsMapEntry returns true if this is a synthetic message type that represents an entry +// in a map field. +func (md *MessageDescriptor) IsMapEntry() bool { + return md.wrapped.IsMapEntry() +} + +// GetFields returns all of the fields for this message. +func (md *MessageDescriptor) GetFields() []*FieldDescriptor { + return md.fields +} + +// GetNestedMessageTypes returns all of the message types declared inside this message. +func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor { + return md.nested +} + +// GetNestedEnumTypes returns all of the enums declared inside this message. +func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor { + return md.enums +} + +// GetNestedExtensions returns all of the extensions declared inside this message. +func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor { + return md.extensions +} + +// GetOneOfs returns all of the one-of field sets declared inside this message. +func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor { + return md.oneOfs +} + +// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3". +func (md *MessageDescriptor) IsProto3() bool { + return md.file.IsProto3() +} + +// GetExtensionRanges returns the ranges of extension field numbers for this message. +func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange { + return md.extRanges +} + +// IsExtendable returns true if this message has any extension ranges. +func (md *MessageDescriptor) IsExtendable() bool { + return len(md.extRanges) > 0 +} + +// IsExtension returns true if the given tag number is within any of this message's +// extension ranges. +func (md *MessageDescriptor) IsExtension(tagNumber int32) bool { + return md.extRanges.IsExtension(tagNumber) +} + +type extRanges []proto.ExtensionRange + +func (er extRanges) String() string { + var buf bytes.Buffer + first := true + for _, r := range er { + if first { + first = false + } else { + buf.WriteString(",") + } + fmt.Fprintf(&buf, "%d..%d", r.Start, r.End) + } + return buf.String() +} + +func (er extRanges) IsExtension(tagNumber int32) bool { + i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber }) + return i < len(er) && tagNumber >= er[i].Start +} + +func (er extRanges) Len() int { + return len(er) +} + +func (er extRanges) Less(i, j int) bool { + return er[i].Start < er[j].Start +} + +func (er extRanges) Swap(i, j int) { + er[i], er[j] = er[j], er[i] +} + +// FindFieldByName finds the field with the given name. If no such field exists +// then nil is returned. Only regular fields are returned, not extensions. +func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor { + fqn := fmt.Sprintf("%s.%s", md.GetFullyQualifiedName(), fieldName) + if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() { + return fd + } else { + return nil + } +} + +// FindFieldByNumber finds the field with the given tag number. If no such field +// exists then nil is returned. Only regular fields are returned, not extensions. +func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor { + if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() { + return fd + } else { + return nil + } +} + +// FieldDescriptor describes a field of a protocol buffer message. +type FieldDescriptor struct { + wrapped protoreflect.FieldDescriptor + proto *descriptorpb.FieldDescriptorProto + parent Descriptor + owner *MessageDescriptor + file *FileDescriptor + oneOf *OneOfDescriptor + msgType *MessageDescriptor + enumType *EnumDescriptor + sourceInfoPath []int32 + def memoizedDefault +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapField, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor { + return fd.wrapped +} + +// UnwrapField returns the underlying protoreflect.FieldDescriptor. +func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor { + return fd.wrapped +} + +func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor { + ret := &FieldDescriptor{ + wrapped: fld, + proto: fldp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + cache.put(fld, ret) + if !fld.IsExtension() { + ret.owner = parent.(*MessageDescriptor) + } + // owner for extensions, field type (be it message or enum), and one-ofs get resolved later + return ret +} + +func descriptorType(d Descriptor) string { + switch d := d.(type) { + case *FileDescriptor: + return "a file" + case *MessageDescriptor: + return "a message" + case *FieldDescriptor: + if d.IsExtension() { + return "an extension" + } + return "a field" + case *OneOfDescriptor: + return "a oneof" + case *EnumDescriptor: + return "an enum" + case *EnumValueDescriptor: + return "an enum value" + case *ServiceDescriptor: + return "a service" + case *MethodDescriptor: + return "a method" + default: + return fmt.Sprintf("a %T", d) + } +} + +func (fd *FieldDescriptor) resolve(cache descriptorCache) error { + if fd.proto.OneofIndex != nil && fd.oneOf == nil { + return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex) + } + if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + desc, err := resolve(fd.file, fd.wrapped.Enum(), cache) + if err != nil { + return err + } + enumType, ok := desc.(*EnumDescriptor) + if !ok { + return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc)) + } + fd.enumType = enumType + } + if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + desc, err := resolve(fd.file, fd.wrapped.Message(), cache) + if err != nil { + return err + } + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc)) + } + fd.msgType = msgType + } + if fd.IsExtension() { + desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache) + if err != nil { + return err + } + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc)) + } + fd.owner = msgType + } + fd.file.registerField(fd) + return nil +} + +func (fd *FieldDescriptor) determineDefault() interface{} { + if fd.IsMap() { + return map[interface{}]interface{}(nil) + } else if fd.IsRepeated() { + return []interface{}(nil) + } else if fd.msgType != nil { + return nil + } + + proto3 := fd.file.IsProto3() + if !proto3 { + def := fd.AsFieldDescriptorProto().GetDefaultValue() + if def != "" { + ret := parseDefaultValue(fd, def) + if ret != nil { + return ret + } + // if we can't parse default value, fall-through to return normal default... + } + } + + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32: + return uint32(0) + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32: + return int32(0) + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return uint64(0) + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return int64(0) + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return float32(0.0) + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return float64(0.0) + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return false + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return []byte(nil) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return "" + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if proto3 { + return int32(0) + } + enumVals := fd.GetEnumType().GetValues() + if len(enumVals) > 0 { + return enumVals[0].GetNumber() + } else { + return int32(0) // WTF? + } + default: + panic(fmt.Sprintf("Unknown field type: %v", fd.GetType())) + } +} + +func parseDefaultValue(fd *FieldDescriptor, val string) interface{} { + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + vd := fd.GetEnumType().FindValueByName(val) + if vd != nil { + return vd.GetNumber() + } + return nil + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if val == "true" { + return true + } else if val == "false" { + return false + } + return nil + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return []byte(unescape(val)) + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return val + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(val, 32); err == nil { + return float32(f) + } else { + return float32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(val, 64); err == nil { + return f + } else { + return float64(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, err := strconv.ParseInt(val, 10, 32); err == nil { + return int32(i) + } else { + return int32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, err := strconv.ParseUint(val, 10, 32); err == nil { + return uint32(i) + } else { + return uint32(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if i, err := strconv.ParseInt(val, 10, 64); err == nil { + return i + } else { + return int64(0) + } + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if i, err := strconv.ParseUint(val, 10, 64); err == nil { + return i + } else { + return uint64(0) + } + default: + return nil + } +} + +func unescape(s string) string { + // protoc encodes default values for 'bytes' fields using C escaping, + // so this function reverses that escaping + out := make([]byte, 0, len(s)) + var buf [4]byte + for len(s) > 0 { + if s[0] != '\\' || len(s) < 2 { + // not escape sequence, or too short to be well-formed escape + out = append(out, s[0]) + s = s[1:] + } else if s[1] == 'x' || s[1] == 'X' { + n := matchPrefix(s[2:], 2, isHex) + if n == 0 { + // bad escape + out = append(out, s[:2]...) + s = s[2:] + } else { + c, err := strconv.ParseUint(s[2:2+n], 16, 8) + if err != nil { + // shouldn't really happen... + out = append(out, s[:2+n]...) + } else { + out = append(out, byte(c)) + } + s = s[2+n:] + } + } else if s[1] >= '0' && s[1] <= '7' { + n := 1 + matchPrefix(s[2:], 2, isOctal) + c, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil || c > 0xff { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(c)) + } + s = s[1+n:] + } else if s[1] == 'u' { + if len(s) < 6 { + // bad escape + out = append(out, s...) + s = s[len(s):] + } else { + c, err := strconv.ParseUint(s[2:6], 16, 16) + if err != nil { + // bad escape + out = append(out, s[:6]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + s = s[6:] + } + } else if s[1] == 'U' { + if len(s) < 10 { + // bad escape + out = append(out, s...) + s = s[len(s):] + } else { + c, err := strconv.ParseUint(s[2:10], 16, 32) + if err != nil || c > 0x10ffff { + // bad escape + out = append(out, s[:10]...) + } else { + w := utf8.EncodeRune(buf[:], rune(c)) + out = append(out, buf[:w]...) + } + s = s[10:] + } + } else { + switch s[1] { + case 'a': + out = append(out, '\a') + case 'b': + out = append(out, '\b') + case 'f': + out = append(out, '\f') + case 'n': + out = append(out, '\n') + case 'r': + out = append(out, '\r') + case 't': + out = append(out, '\t') + case 'v': + out = append(out, '\v') + case '\\': + out = append(out, '\\') + case '\'': + out = append(out, '\'') + case '"': + out = append(out, '"') + case '?': + out = append(out, '?') + default: + // invalid escape, just copy it as-is + out = append(out, s[:2]...) + } + s = s[2:] + } + } + return string(out) +} + +func isOctal(b byte) bool { return b >= '0' && b <= '7' } +func isHex(b byte) bool { + return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F') +} +func matchPrefix(s string, limit int, fn func(byte) bool) int { + l := len(s) + if l > limit { + l = limit + } + i := 0 + for ; i < l; i++ { + if !fn(s[i]) { + return i + } + } + return i +} + +// GetName returns the name of the field. +func (fd *FieldDescriptor) GetName() string { + return string(fd.wrapped.Name()) +} + +// GetNumber returns the tag number of this field. +func (fd *FieldDescriptor) GetNumber() int32 { + return int32(fd.wrapped.Number()) +} + +// GetFullyQualifiedName returns the fully qualified name of the field. Unlike +// GetName, this includes fully qualified name of the enclosing message for +// regular fields. +// +// For extension fields, this includes the package (if there is one) as well as +// any enclosing messages. The package and/or enclosing messages are for where +// the extension is defined, not the message it extends. +// +// If this field is part of a one-of, the fully qualified name does *not* +// include the name of the one-of, only of the enclosing message. +func (fd *FieldDescriptor) GetFullyQualifiedName() string { + return string(fd.wrapped.FullName()) +} + +// GetParent returns the fields's enclosing descriptor. For normal +// (non-extension) fields, this is the enclosing message. For extensions, this +// is the descriptor in which the extension is defined, not the message that is +// extended. The parent for an extension may be a file descriptor or a message, +// depending on where the extension is defined. +func (fd *FieldDescriptor) GetParent() Descriptor { + return fd.parent +} + +// GetFile returns the descriptor for the file in which this field is defined. +func (fd *FieldDescriptor) GetFile() *FileDescriptor { + return fd.file +} + +// GetOptions returns the field's options. Most usages will be more interested +// in GetFieldOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (fd *FieldDescriptor) GetOptions() proto.Message { + return fd.proto.GetOptions() +} + +// GetFieldOptions returns the field's options. +func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions { + return fd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the field, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// field was defined and also contains comments associated with the field +// definition. +func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return fd.file.sourceInfo.Get(fd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsFieldDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (fd *FieldDescriptor) AsProto() proto.Message { + return fd.proto +} + +// AsFieldDescriptorProto returns the underlying descriptor proto. +func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto { + return fd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (fd *FieldDescriptor) String() string { + return fd.proto.String() +} + +// GetJSONName returns the name of the field as referenced in the message's JSON +// format. +func (fd *FieldDescriptor) GetJSONName() string { + if jsonName := fd.proto.JsonName; jsonName != nil { + // if json name is present, use its value + return *jsonName + } + // otherwise, compute the proper JSON name from the field name + return jsonCamelCase(fd.proto.GetName()) +} + +func jsonCamelCase(s string) string { + // This mirrors the implementation in protoc/C++ runtime and in the Java runtime: + // https://github.com/protocolbuffers/protobuf/blob/a104dffcb6b1958a424f5fa6f9e6bdc0ab9b6f9e/src/google/protobuf/descriptor.cc#L276 + // https://github.com/protocolbuffers/protobuf/blob/a1c886834425abb64a966231dd2c9dd84fb289b3/java/core/src/main/java/com/google/protobuf/Descriptors.java#L1286 + var buf bytes.Buffer + prevWasUnderscore := false + for _, r := range s { + if r == '_' { + prevWasUnderscore = true + continue + } + if prevWasUnderscore { + r = unicode.ToUpper(r) + prevWasUnderscore = false + } + buf.WriteRune(r) + } + return buf.String() +} + +// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName), +// but includes the fully qualified name of the enclosing message. +// +// If the field is an extension, it will return the package name (if there is +// one) as well as the names of any enclosing messages. The package and/or +// enclosing messages are for where the extension is defined, not the message it +// extends. +func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string { + parent := fd.GetParent() + switch parent := parent.(type) { + case *FileDescriptor: + pkg := parent.GetPackage() + if pkg == "" { + return fd.GetJSONName() + } + return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName()) + default: + return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName()) + } +} + +// GetOwner returns the message type that this field belongs to. If this is a normal +// field then this is the same as GetParent. But for extensions, this will be the +// extendee message whereas GetParent refers to where the extension was declared. +func (fd *FieldDescriptor) GetOwner() *MessageDescriptor { + return fd.owner +} + +// IsExtension returns true if this is an extension field. +func (fd *FieldDescriptor) IsExtension() bool { + return fd.wrapped.IsExtension() +} + +// GetOneOf returns the one-of field set to which this field belongs. If this field +// is not part of a one-of then this method returns nil. +func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor { + return fd.oneOf +} + +// GetType returns the type of this field. If the type indicates an enum, the +// enum type can be queried via GetEnumType. If the type indicates a message, the +// message type can be queried via GetMessageType. +func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type { + return fd.proto.GetType() +} + +// GetLabel returns the label for this field. The label can be required (proto2-only), +// optional (default for proto3), or required. +func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label { + return fd.proto.GetLabel() +} + +// IsRequired returns true if this field has the "required" label. +func (fd *FieldDescriptor) IsRequired() bool { + return fd.wrapped.Cardinality() == protoreflect.Required +} + +// IsRepeated returns true if this field has the "repeated" label. +func (fd *FieldDescriptor) IsRepeated() bool { + return fd.wrapped.Cardinality() == protoreflect.Repeated +} + +// IsProto3Optional returns true if this field has an explicit "optional" label +// and is in a "proto3" syntax file. Such fields, if they are normal fields (not +// extensions), will be nested in synthetic oneofs that contain only the single +// field. +func (fd *FieldDescriptor) IsProto3Optional() bool { + return fd.proto.GetProto3Optional() +} + +// HasPresence returns true if this field can distinguish when a value is +// present or not. Scalar fields in "proto3" syntax files, for example, return +// false since absent values are indistinguishable from zero values. +func (fd *FieldDescriptor) HasPresence() bool { + return fd.wrapped.HasPresence() +} + +// IsMap returns true if this is a map field. If so, it will have the "repeated" +// label its type will be a message that represents a map entry. The map entry +// message will have exactly two fields: tag #1 is the key and tag #2 is the value. +func (fd *FieldDescriptor) IsMap() bool { + return fd.wrapped.IsMap() +} + +// GetMapKeyType returns the type of the key field if this is a map field. If it is +// not a map field, nil is returned. +func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor { + if fd.IsMap() { + return fd.msgType.FindFieldByNumber(int32(1)) + } + return nil +} + +// GetMapValueType returns the type of the value field if this is a map field. If it +// is not a map field, nil is returned. +func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor { + if fd.IsMap() { + return fd.msgType.FindFieldByNumber(int32(2)) + } + return nil +} + +// GetMessageType returns the type of this field if it is a message type. If +// this field is not a message type, it returns nil. +func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor { + return fd.msgType +} + +// GetEnumType returns the type of this field if it is an enum type. If this +// field is not an enum type, it returns nil. +func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor { + return fd.enumType +} + +// GetDefaultValue returns the default value for this field. +// +// If this field represents a message type, this method always returns nil (even though +// for proto2 files, the default value should be a default instance of the message type). +// If the field represents an enum type, this method returns an int32 corresponding to the +// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If +// this field is repeated (and not a map), it returns a nil []interface{}. +// +// Otherwise, it returns the declared default value for the field or a zero value, if no +// default is declared or if the file is proto3. The type of said return value corresponds +// to the type of the field: +// +// +-------------------------+-----------+ +// | Declared Type | Go Type | +// +-------------------------+-----------+ +// | int32, sint32, sfixed32 | int32 | +// | int64, sint64, sfixed64 | int64 | +// | uint32, fixed32 | uint32 | +// | uint64, fixed64 | uint64 | +// | float | float32 | +// | double | double32 | +// | bool | bool | +// | string | string | +// | bytes | []byte | +// +-------------------------+-----------+ +func (fd *FieldDescriptor) GetDefaultValue() interface{} { + return fd.getDefaultValue() +} + +// EnumDescriptor describes an enum declared in a proto file. +type EnumDescriptor struct { + wrapped protoreflect.EnumDescriptor + proto *descriptorpb.EnumDescriptorProto + parent Descriptor + file *FileDescriptor + values []*EnumValueDescriptor + valuesByNum sortedValues + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapEnum, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor { + return ed.wrapped +} + +// UnwrapEnum returns the underlying protoreflect.EnumDescriptor. +func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor { + return ed.wrapped +} + +func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor { + ret := &EnumDescriptor{ + wrapped: ed, + proto: edp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + path = append(path, internal.Enum_valuesTag) + for i := 0; i < ed.Values().Len(); i++ { + src := ed.Values().Get(i) + srcProto := edp.GetValue()[src.Index()] + evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = evd + // NB: for backwards compatibility, also register the enum value as if + // scoped within the enum (counter-intuitively, enum value full names are + // scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName + // returns that alternate full name. + symbols[evd.GetFullyQualifiedName()] = evd + ret.values = append(ret.values, evd) + } + if len(ret.values) > 0 { + ret.valuesByNum = make(sortedValues, len(ret.values)) + copy(ret.valuesByNum, ret.values) + sort.Stable(ret.valuesByNum) + } + return ret +} + +type sortedValues []*EnumValueDescriptor + +func (sv sortedValues) Len() int { + return len(sv) +} + +func (sv sortedValues) Less(i, j int) bool { + return sv[i].GetNumber() < sv[j].GetNumber() +} + +func (sv sortedValues) Swap(i, j int) { + sv[i], sv[j] = sv[j], sv[i] + +} + +// GetName returns the simple (unqualified) name of the enum type. +func (ed *EnumDescriptor) GetName() string { + return string(ed.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the enum type. +// This includes the package name (if there is one) as well as the names of any +// enclosing messages. +func (ed *EnumDescriptor) GetFullyQualifiedName() string { + return string(ed.wrapped.FullName()) +} + +// GetParent returns the enum type's enclosing descriptor. For top-level enums, +// this will be a file descriptor. Otherwise it will be the descriptor for the +// enclosing message. +func (ed *EnumDescriptor) GetParent() Descriptor { + return ed.parent +} + +// GetFile returns the descriptor for the file in which this enum is defined. +func (ed *EnumDescriptor) GetFile() *FileDescriptor { + return ed.file +} + +// GetOptions returns the enum type's options. Most usages will be more +// interested in GetEnumOptions, which has a concrete return type. This generic +// version is present to satisfy the Descriptor interface. +func (ed *EnumDescriptor) GetOptions() proto.Message { + return ed.proto.GetOptions() +} + +// GetEnumOptions returns the enum type's options. +func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions { + return ed.proto.GetOptions() +} + +// GetSourceInfo returns source info for the enum type, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// enum type was defined and also contains comments associated with the enum +// definition. +func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return ed.file.sourceInfo.Get(ed.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsEnumDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (ed *EnumDescriptor) AsProto() proto.Message { + return ed.proto +} + +// AsEnumDescriptorProto returns the underlying descriptor proto. +func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto { + return ed.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (ed *EnumDescriptor) String() string { + return ed.proto.String() +} + +// GetValues returns all of the allowed values defined for this enum. +func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor { + return ed.values +} + +// FindValueByName finds the enum value with the given name. If no such value exists +// then nil is returned. +func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor { + fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name) + if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok { + return vd + } else { + return nil + } +} + +// FindValueByNumber finds the value with the given numeric value. If no such value +// exists then nil is returned. If aliases are allowed and multiple values have the +// given number, the first declared value is returned. +func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor { + index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num }) + if index < len(ed.valuesByNum) { + vd := ed.valuesByNum[index] + if vd.GetNumber() == num { + return vd + } + } + return nil +} + +// EnumValueDescriptor describes an allowed value of an enum declared in a proto file. +type EnumValueDescriptor struct { + wrapped protoreflect.EnumValueDescriptor + proto *descriptorpb.EnumValueDescriptorProto + parent *EnumDescriptor + file *FileDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapEnumValue, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor { + return vd.wrapped +} + +// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor. +func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor { + return vd.wrapped +} + +func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor { + return &EnumValueDescriptor{ + wrapped: evd, + proto: evdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } +} + +func (vd *EnumValueDescriptor) resolve(path []int32) { + vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy +} + +// GetName returns the name of the enum value. +func (vd *EnumValueDescriptor) GetName() string { + return string(vd.wrapped.Name()) +} + +// GetNumber returns the numeric value associated with this enum value. +func (vd *EnumValueDescriptor) GetNumber() int32 { + return int32(vd.wrapped.Number()) +} + +// GetFullyQualifiedName returns the fully qualified name of the enum value. +// Unlike GetName, this includes fully qualified name of the enclosing enum. +func (vd *EnumValueDescriptor) GetFullyQualifiedName() string { + // NB: Technically, we do not return the correct value. Enum values are + // scoped within the enclosing element, not within the enum itself (which + // is very non-intuitive, but it follows C++ scoping rules). The value + // returned from vd.wrapped.FullName() is correct. However, we return + // something different, just for backwards compatibility, as this package + // has always instead returned the name scoped inside the enum. + return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name()) +} + +// GetParent returns the descriptor for the enum in which this enum value is +// defined. Most usages will prefer to use GetEnum, which has a concrete return +// type. This more generic method is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) GetParent() Descriptor { + return vd.parent +} + +// GetEnum returns the enum in which this enum value is defined. +func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor { + return vd.parent +} + +// GetFile returns the descriptor for the file in which this enum value is +// defined. +func (vd *EnumValueDescriptor) GetFile() *FileDescriptor { + return vd.file +} + +// GetOptions returns the enum value's options. Most usages will be more +// interested in GetEnumValueOptions, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) GetOptions() proto.Message { + return vd.proto.GetOptions() +} + +// GetEnumValueOptions returns the enum value's options. +func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions { + return vd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the enum value, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// enum value was defined and also contains comments associated with the enum +// value definition. +func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return vd.file.sourceInfo.Get(vd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsEnumValueDescriptorProto, which has a concrete return type. +// This generic version is present to satisfy the Descriptor interface. +func (vd *EnumValueDescriptor) AsProto() proto.Message { + return vd.proto +} + +// AsEnumValueDescriptorProto returns the underlying descriptor proto. +func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto { + return vd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (vd *EnumValueDescriptor) String() string { + return vd.proto.String() +} + +// ServiceDescriptor describes an RPC service declared in a proto file. +type ServiceDescriptor struct { + wrapped protoreflect.ServiceDescriptor + proto *descriptorpb.ServiceDescriptorProto + file *FileDescriptor + methods []*MethodDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapService, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor { + return sd.wrapped +} + +// UnwrapService returns the underlying protoreflect.ServiceDescriptor. +func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor { + return sd.wrapped +} + +func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor { + ret := &ServiceDescriptor{ + wrapped: sd, + proto: sdp, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + path = append(path, internal.Service_methodsTag) + for i := 0; i < sd.Methods().Len(); i++ { + src := sd.Methods().Get(i) + srcProto := sdp.GetMethod()[src.Index()] + md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i))) + symbols[string(src.FullName())] = md + ret.methods = append(ret.methods, md) + } + return ret +} + +func (sd *ServiceDescriptor) resolve(cache descriptorCache) error { + for _, md := range sd.methods { + if err := md.resolve(cache); err != nil { + return err + } + } + return nil +} + +// GetName returns the simple (unqualified) name of the service. +func (sd *ServiceDescriptor) GetName() string { + return string(sd.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the service. This +// includes the package name (if there is one). +func (sd *ServiceDescriptor) GetFullyQualifiedName() string { + return string(sd.wrapped.FullName()) +} + +// GetParent returns the descriptor for the file in which this service is +// defined. Most usages will prefer to use GetFile, which has a concrete return +// type. This more generic method is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) GetParent() Descriptor { + return sd.file +} + +// GetFile returns the descriptor for the file in which this service is defined. +func (sd *ServiceDescriptor) GetFile() *FileDescriptor { + return sd.file +} + +// GetOptions returns the service's options. Most usages will be more interested +// in GetServiceOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) GetOptions() proto.Message { + return sd.proto.GetOptions() +} + +// GetServiceOptions returns the service's options. +func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions { + return sd.proto.GetOptions() +} + +// GetSourceInfo returns source info for the service, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// service was defined and also contains comments associated with the service +// definition. +func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return sd.file.sourceInfo.Get(sd.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsServiceDescriptorProto, which has a concrete return type. +// This generic version is present to satisfy the Descriptor interface. +func (sd *ServiceDescriptor) AsProto() proto.Message { + return sd.proto +} + +// AsServiceDescriptorProto returns the underlying descriptor proto. +func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto { + return sd.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (sd *ServiceDescriptor) String() string { + return sd.proto.String() +} + +// GetMethods returns all of the RPC methods for this service. +func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor { + return sd.methods +} + +// FindMethodByName finds the method with the given name. If no such method exists +// then nil is returned. +func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor { + fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name) + if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok { + return md + } else { + return nil + } +} + +// MethodDescriptor describes an RPC method declared in a proto file. +type MethodDescriptor struct { + wrapped protoreflect.MethodDescriptor + proto *descriptorpb.MethodDescriptorProto + parent *ServiceDescriptor + file *FileDescriptor + inType *MessageDescriptor + outType *MessageDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapMethod, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor { + return md.wrapped +} + +// UnwrapMethod returns the underlying protoreflect.MethodDescriptor. +func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor { + return md.wrapped +} + +func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor { + // request and response types get resolved later + return &MethodDescriptor{ + wrapped: md, + proto: mdp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } +} + +func (md *MethodDescriptor) resolve(cache descriptorCache) error { + if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil { + return err + } else { + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc)) + } + md.inType = msgType + } + if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil { + return err + } else { + msgType, ok := desc.(*MessageDescriptor) + if !ok { + return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc)) + } + md.outType = msgType + } + return nil +} + +// GetName returns the name of the method. +func (md *MethodDescriptor) GetName() string { + return string(md.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the method. Unlike +// GetName, this includes fully qualified name of the enclosing service. +func (md *MethodDescriptor) GetFullyQualifiedName() string { + return string(md.wrapped.FullName()) +} + +// GetParent returns the descriptor for the service in which this method is +// defined. Most usages will prefer to use GetService, which has a concrete +// return type. This more generic method is present to satisfy the Descriptor +// interface. +func (md *MethodDescriptor) GetParent() Descriptor { + return md.parent +} + +// GetService returns the RPC service in which this method is declared. +func (md *MethodDescriptor) GetService() *ServiceDescriptor { + return md.parent +} + +// GetFile returns the descriptor for the file in which this method is defined. +func (md *MethodDescriptor) GetFile() *FileDescriptor { + return md.file +} + +// GetOptions returns the method's options. Most usages will be more interested +// in GetMethodOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (md *MethodDescriptor) GetOptions() proto.Message { + return md.proto.GetOptions() +} + +// GetMethodOptions returns the method's options. +func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions { + return md.proto.GetOptions() +} + +// GetSourceInfo returns source info for the method, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// method was defined and also contains comments associated with the method +// definition. +func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return md.file.sourceInfo.Get(md.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsMethodDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (md *MethodDescriptor) AsProto() proto.Message { + return md.proto +} + +// AsMethodDescriptorProto returns the underlying descriptor proto. +func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto { + return md.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (md *MethodDescriptor) String() string { + return md.proto.String() +} + +// IsServerStreaming returns true if this is a server-streaming method. +func (md *MethodDescriptor) IsServerStreaming() bool { + return md.wrapped.IsStreamingServer() +} + +// IsClientStreaming returns true if this is a client-streaming method. +func (md *MethodDescriptor) IsClientStreaming() bool { + return md.wrapped.IsStreamingClient() +} + +// GetInputType returns the input type, or request type, of the RPC method. +func (md *MethodDescriptor) GetInputType() *MessageDescriptor { + return md.inType +} + +// GetOutputType returns the output type, or response type, of the RPC method. +func (md *MethodDescriptor) GetOutputType() *MessageDescriptor { + return md.outType +} + +// OneOfDescriptor describes a one-of field set declared in a protocol buffer message. +type OneOfDescriptor struct { + wrapped protoreflect.OneofDescriptor + proto *descriptorpb.OneofDescriptorProto + parent *MessageDescriptor + file *FileDescriptor + choices []*FieldDescriptor + sourceInfoPath []int32 +} + +// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more +// interested in UnwrapOneOf, which has a more specific return type. This generic +// version is present to satisfy the DescriptorWrapper interface. +func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor { + return od.wrapped +} + +// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor. +func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor { + return od.wrapped +} + +func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor { + ret := &OneOfDescriptor{ + wrapped: od, + proto: odp, + parent: parent, + file: fd, + sourceInfoPath: append([]int32(nil), path...), // defensive copy + } + for _, f := range parent.fields { + oi := f.proto.OneofIndex + if oi != nil && *oi == int32(index) { + f.oneOf = ret + ret.choices = append(ret.choices, f) + } + } + return ret +} + +// GetName returns the name of the one-of. +func (od *OneOfDescriptor) GetName() string { + return string(od.wrapped.Name()) +} + +// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike +// GetName, this includes fully qualified name of the enclosing message. +func (od *OneOfDescriptor) GetFullyQualifiedName() string { + return string(od.wrapped.FullName()) +} + +// GetParent returns the descriptor for the message in which this one-of is +// defined. Most usages will prefer to use GetOwner, which has a concrete +// return type. This more generic method is present to satisfy the Descriptor +// interface. +func (od *OneOfDescriptor) GetParent() Descriptor { + return od.parent +} + +// GetOwner returns the message to which this one-of field set belongs. +func (od *OneOfDescriptor) GetOwner() *MessageDescriptor { + return od.parent +} + +// GetFile returns the descriptor for the file in which this one-fof is defined. +func (od *OneOfDescriptor) GetFile() *FileDescriptor { + return od.file +} + +// GetOptions returns the one-of's options. Most usages will be more interested +// in GetOneOfOptions, which has a concrete return type. This generic version +// is present to satisfy the Descriptor interface. +func (od *OneOfDescriptor) GetOptions() proto.Message { + return od.proto.GetOptions() +} + +// GetOneOfOptions returns the one-of's options. +func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions { + return od.proto.GetOptions() +} + +// GetSourceInfo returns source info for the one-of, if present in the +// descriptor. Not all descriptors will contain source info. If non-nil, the +// returned info contains information about the location in the file where the +// one-of was defined and also contains comments associated with the one-of +// definition. +func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location { + return od.file.sourceInfo.Get(od.sourceInfoPath) +} + +// AsProto returns the underlying descriptor proto. Most usages will be more +// interested in AsOneofDescriptorProto, which has a concrete return type. This +// generic version is present to satisfy the Descriptor interface. +func (od *OneOfDescriptor) AsProto() proto.Message { + return od.proto +} + +// AsOneofDescriptorProto returns the underlying descriptor proto. +func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto { + return od.proto +} + +// String returns the underlying descriptor proto, in compact text format. +func (od *OneOfDescriptor) String() string { + return od.proto.String() +} + +// GetChoices returns the fields that are part of the one-of field set. At most one of +// these fields may be set for a given message. +func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor { + return od.choices +} + +func (od *OneOfDescriptor) IsSynthetic() bool { + return od.wrapped.IsSynthetic() +} + +func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) { + d := cache.get(src) + if d != nil { + return d, nil + } + + fqn := string(src.FullName()) + + d = fd.FindSymbol(fqn) + if d != nil { + return d, nil + } + + for _, dep := range fd.deps { + d := dep.FindSymbol(fqn) + if d != nil { + return d, nil + } + } + + return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go new file mode 100644 index 00000000..25d619a2 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go @@ -0,0 +1,30 @@ +//go:build appengine || gopherjs || purego +// +build appengine gopherjs purego + +// NB: other environments where unsafe is unappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package desc + +type jsonNameMap struct{} +type memoizedDefault struct{} + +// FindFieldByJSONName finds the field with the given JSON field name. If no such +// field exists then nil is returned. Only regular fields are returned, not +// extensions. +func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor { + // NB: With allowed use of unsafe, we use it to atomically define an index + // via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index + // and must do a linear scan of fields each time. + for _, f := range md.fields { + jn := f.GetJSONName() + if jn == jsonName { + return f + } + } + return nil +} + +func (fd *FieldDescriptor) getDefaultValue() interface{} { + return fd.determineDefault() +} diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go new file mode 100644 index 00000000..691f0d88 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go @@ -0,0 +1,59 @@ +//go:build !appengine && !gopherjs && !purego +// +build !appengine,!gopherjs,!purego + +// NB: other environments where unsafe is unappropriate should use "purego" build tag +// https://github.com/golang/go/issues/23172 + +package desc + +import ( + "sync/atomic" + "unsafe" +) + +type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe +type memoizedDefault *interface{} // loaded/stored atomically via atomic+unsafe + +// FindFieldByJSONName finds the field with the given JSON field name. If no such +// field exists then nil is returned. Only regular fields are returned, not +// extensions. +func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor { + // NB: We don't want to eagerly index JSON names because many programs won't use it. + // So we want to do it lazily, but also make sure the result is thread-safe. So we + // atomically load/store the map as if it were a normal pointer. We don't use other + // mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to + // do this lazily because those types cannot be copied, and we'd rather not induce + // 'go vet' errors in programs that use descriptors and try to copy them. + // If multiple goroutines try to access the index at the same time, before it is + // built, they will all end up computing the index redundantly. Future reads of + // the index will use whatever was the "last one stored" by those racing goroutines. + // Since building the index is deterministic, this is fine: all indices computed + // will be the same. + addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames)) + jsonNames := atomic.LoadPointer(addrOfJsonNames) + var index map[string]*FieldDescriptor + if jsonNames == nil { + // slow path: compute the index + index = map[string]*FieldDescriptor{} + for _, f := range md.fields { + jn := f.GetJSONName() + index[jn] = f + } + atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index))) + } else { + *(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames + } + return index[jsonName] +} + +func (fd *FieldDescriptor) getDefaultValue() interface{} { + addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def)) + def := atomic.LoadPointer(addrOfDef) + if def != nil { + return *(*interface{})(def) + } + // slow path: compute the default, potentially involves decoding value + d := fd.determineDefault() + atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d))) + return d +} diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go new file mode 100644 index 00000000..dfac5c72 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/doc.go @@ -0,0 +1,62 @@ +// Package desc contains "rich descriptors" for protocol buffers. The built-in +// descriptor types are simple protobuf messages, each one representing a +// different kind of element in the AST of a .proto source file. +// +// Because of this inherent "tree" quality, these build-in descriptors cannot +// refer to their enclosing file descriptor. Nor can a field descriptor refer to +// a message or enum descriptor that represents the field's type (for enum and +// nested message fields). All such links must instead be stringly typed. This +// limitation makes them much harder to use for doing interesting things with +// reflection. +// +// Without this package, resolving references to types is particularly complex. +// For example, resolving a field's type, the message type an extension extends, +// or the request and response types of an RPC method all require searching +// through symbols defined not only in the file in which these elements are +// declared but also in its transitive closure of dependencies. +// +// "Rich descriptors" avoid the need to deal with the complexities described +// above. A rich descriptor has all type references resolved and provides +// methods to access other rich descriptors for all referenced elements. Each +// rich descriptor has a usefully broad API, but does not try to mimic the full +// interface of the underlying descriptor proto. Instead, every rich descriptor +// provides access to that underlying proto, for extracting descriptor +// properties that are not immediately accessible through rich descriptor's +// methods. +// +// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same +// repo to see just how useful rich descriptors really are. +// +// # Loading Descriptors +// +// Rich descriptors can be accessed in similar ways as their "poor" cousins +// (descriptor protos). Instead of using proto.FileDescriptor, use +// desc.LoadFileDescriptor. Message descriptors and extension field descriptors +// can also be easily accessed using desc.LoadMessageDescriptor and +// desc.LoadFieldDescriptorForExtension, respectively. +// +// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then +// the descriptors returned from these Load* functions will include source code +// information, and thus include comments for elements. +// +// # Creating Descriptors +// +// It is also possible create rich descriptors for proto messages that a given +// Go program doesn't even know about. For example, they could be loaded from a +// FileDescriptorSet file (which can be generated by protoc) or loaded from a +// server. This enables interesting things like dynamic clients: where a Go +// program can be an RPC client of a service it wasn't compiled to know about. +// +// You cannot create a message descriptor without also creating its enclosing +// file, because the enclosing file is what contains other relevant information +// like other symbols and dependencies/imports, which is how type references +// are resolved (such as when a field in a message has a type that is another +// message or enum). +// +// So the functions in this package for creating descriptors are all for +// creating *file* descriptors. See the various Create* functions for more +// information. +// +// Also see the desc/builder sub-package, for another API that makes it easier +// to synthesize descriptors programmatically. +package desc diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go new file mode 100644 index 00000000..8e6a0d6e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/imports.go @@ -0,0 +1,360 @@ +package desc + +import ( + "fmt" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + globalImportPathConf map[string]string + globalImportPathMu sync.RWMutex +) + +// RegisterImportPath registers an alternate import path for a given registered +// proto file path. For more details on why alternate import paths may need to +// be configured, see ImportResolver. +// +// This method panics if provided invalid input. An empty importPath is invalid. +// An un-registered registerPath is also invalid. For example, if an attempt is +// made to register the import path "foo/bar.proto" as "bar.proto", but there is +// no "bar.proto" registered in the Go protobuf runtime, this method will panic. +// This method also panics if an attempt is made to register the same import +// path more than once. +// +// This function works globally, applying to all descriptors loaded by this +// package. If you instead want more granular support for handling alternate +// import paths -- such as for a single invocation of a function in this +// package or when the alternate path is only used from one file (so you don't +// want the alternate path used when loading every other file), use an +// ImportResolver instead. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func RegisterImportPath(registerPath, importPath string) { + if len(importPath) == 0 { + panic("import path cannot be empty") + } + _, err := protoregistry.GlobalFiles.FindFileByPath(registerPath) + if err != nil { + panic(fmt.Sprintf("path %q is not a registered proto file", registerPath)) + } + globalImportPathMu.Lock() + defer globalImportPathMu.Unlock() + if reg := globalImportPathConf[importPath]; reg != "" { + panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg)) + } + if globalImportPathConf == nil { + globalImportPathConf = map[string]string{} + } + globalImportPathConf[importPath] = registerPath +} + +// ResolveImport resolves the given import path. If it has been registered as an +// alternate via RegisterImportPath, the registered path is returned. Otherwise, +// the given import path is returned unchanged. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func ResolveImport(importPath string) string { + importPath = clean(importPath) + globalImportPathMu.RLock() + defer globalImportPathMu.RUnlock() + reg := globalImportPathConf[importPath] + if reg == "" { + return importPath + } + return reg +} + +// ImportResolver lets you work-around linking issues that are caused by +// mismatches between how a particular proto source file is registered in the Go +// protobuf runtime and how that same file is imported by other files. The file +// is registered using the same relative path given to protoc when the file is +// compiled (i.e. when Go code is generated). So if any file tries to import +// that source file, but using a different relative path, then a link error will +// occur when this package tries to load a descriptor for the importing file. +// +// For example, let's say we have two proto source files: "foo/bar.proto" and +// "fubar/baz.proto". The latter imports the former using a line like so: +// +// import "foo/bar.proto"; +// +// However, when protoc is invoked, the command-line args looks like so: +// +// protoc -Ifoo/ --go_out=foo/ bar.proto +// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto +// +// Because the path given to protoc is just "bar.proto" and "baz.proto", this is +// how they are registered in the Go protobuf runtime. So, when loading the +// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto" +// but will find no file registered with that path: +// +// fd, err := desc.LoadFileDescriptor("baz.proto") +// // err will be non-nil, complaining that there is no such file +// // found named "foo/bar.proto" +// +// This can be remedied by registering alternate import paths using an +// ImportResolver. Continuing with the example above, the code below would fix +// any link issue: +// +// var r desc.ImportResolver +// r.RegisterImportPath("bar.proto", "foo/bar.proto") +// fd, err := r.LoadFileDescriptor("baz.proto") +// // err will be nil; descriptor successfully loaded! +// +// If there are files that are *always* imported using a different relative +// path then how they are registered, consider using the global +// RegisterImportPath function, so you don't have to use an ImportResolver for +// every file that imports it. +// +// Note that the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// customizing import paths for descriptor resolution is no longer necessary. +type ImportResolver struct { + children map[string]*ImportResolver + importPaths map[string]string + + // By default, an ImportResolver will fallback to consulting any paths + // registered via the top-level RegisterImportPath function. Setting this + // field to true will cause the ImportResolver to skip that fallback and + // only examine its own locally registered paths. + SkipFallbackRules bool +} + +// ResolveImport resolves the given import path in the context of the given +// source file. If a matching alternate has been registered with this resolver +// via a call to RegisterImportPath or RegisterImportPathFrom, then the +// registered path is returned. Otherwise, the given import path is returned +// unchanged. +func (r *ImportResolver) ResolveImport(source, importPath string) string { + if r != nil { + res := r.resolveImport(clean(source), clean(importPath)) + if res != "" { + return res + } + if r.SkipFallbackRules { + return importPath + } + } + return ResolveImport(importPath) +} + +func (r *ImportResolver) resolveImport(source, importPath string) string { + if source == "" { + return r.importPaths[importPath] + } + var car, cdr string + idx := strings.IndexRune(source, '/') + if idx < 0 { + car, cdr = source, "" + } else { + car, cdr = source[:idx], source[idx+1:] + } + ch := r.children[car] + if ch != nil { + if reg := ch.resolveImport(cdr, importPath); reg != "" { + return reg + } + } + return r.importPaths[importPath] +} + +// RegisterImportPath registers an alternate import path for a given registered +// proto file path with this resolver. Any appearance of the given import path +// when linking files will instead try to link the given registered path. If the +// registered path cannot be located, then linking will fallback to the actual +// imported path. +// +// This method will panic if given an empty path or if the same import path is +// registered more than once. +// +// To constrain the contexts where the given import path is to be re-written, +// use RegisterImportPathFrom instead. +func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) { + r.RegisterImportPathFrom(registerPath, importPath, "") +} + +// RegisterImportPathFrom registers an alternate import path for a given +// registered proto file path with this resolver, but only for imports in the +// specified source context. +// +// The source context can be the name of a folder or a proto source file. Any +// appearance of the given import path in that context will instead try to link +// the given registered path. To be in context, the file that is being linked +// (i.e. the one whose import statement is being resolved) must be the same +// relative path of the source context or be a sub-path (i.e. a descendant of +// the source folder). +// +// If the registered path cannot be located, then linking will fallback to the +// actual imported path. +// +// This method will panic if given an empty path. The source context, on the +// other hand, is allowed to be blank. A blank source matches all files. This +// method also panics if the same import path is registered in the same source +// context more than once. +func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) { + importPath = clean(importPath) + if len(importPath) == 0 { + panic("import path cannot be empty") + } + registerPath = clean(registerPath) + if len(registerPath) == 0 { + panic("registered path cannot be empty") + } + r.registerImportPathFrom(registerPath, importPath, clean(source)) +} + +func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) { + if source == "" { + if r.importPaths == nil { + r.importPaths = map[string]string{} + } else if reg := r.importPaths[importPath]; reg != "" { + panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath)) + } + r.importPaths[importPath] = registerPath + return + } + var car, cdr string + idx := strings.IndexRune(source, '/') + if idx < 0 { + car, cdr = source, "" + } else { + car, cdr = source[:idx], source[idx+1:] + } + ch := r.children[car] + if ch == nil { + if r.children == nil { + r.children = map[string]*ImportResolver{} + } + ch = &ImportResolver{} + r.children[car] = ch + } + ch.registerImportPathFrom(registerPath, importPath, cdr) +} + +// LoadFileDescriptor is the same as the package function of the same name, but +// any alternate paths configured in this resolver are used when linking the +// given descriptor proto. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) { + return LoadFileDescriptor(filePath) +} + +// LoadMessageDescriptor is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking +// files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) { + return LoadMessageDescriptor(msgName) +} + +// LoadMessageDescriptorForMessage is the same as the package function of the +// same name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) { + return LoadMessageDescriptorForMessage(msg) +} + +// LoadMessageDescriptorForType is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) { + return LoadMessageDescriptorForType(msgType) +} + +// LoadEnumDescriptorForEnum is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) { + return LoadEnumDescriptorForEnum(enum) +} + +// LoadEnumDescriptorForType is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) { + return LoadEnumDescriptorForType(enumType) +} + +// LoadFieldDescriptorForExtension is the same as the package function of the +// same name, but any alternate paths configured in this resolver are used when +// linking files for the returned descriptor. +// +// Deprecated: the new protobuf runtime (v1.4+) verifies that import paths are +// correct and that descriptors can be linked during package initialization. So +// registering alternate paths is no longer useful or necessary. +func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) { + return LoadFieldDescriptorForExtension(ext) +} + +// CreateFileDescriptor is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking the +// given descriptor proto. +func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) { + return createFileDescriptor(fdp, deps, r) +} + +// CreateFileDescriptors is the same as the package function of the same name, +// but any alternate paths configured in this resolver are used when linking the +// given descriptor protos. +func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) { + return createFileDescriptors(fds, r) +} + +// CreateFileDescriptorFromSet is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking the descriptor protos in the given set. +func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) { + return createFileDescriptorFromSet(fds, r) +} + +// CreateFileDescriptorsFromSet is the same as the package function of the same +// name, but any alternate paths configured in this resolver are used when +// linking the descriptor protos in the given set. +func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) { + return createFileDescriptorsFromSet(fds, r) +} + +const dotPrefix = "./" + +func clean(path string) string { + if path == "" { + return "" + } + path = filepath.ToSlash(filepath.Clean(path)) + if path == "." { + return "" + } + return strings.TrimPrefix(path, dotPrefix) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go new file mode 100644 index 00000000..aa8c3e99 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go @@ -0,0 +1,75 @@ +package internal + +import ( + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +// ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor +// for each proto3 optional field. It also updates the fields to have the correct +// oneof index reference. The given callback, if not nil, is called for each synthetic +// oneof created. +func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) { + var allNames map[string]struct{} + for _, fd := range msgd.Field { + if fd.GetProto3Optional() { + // lazy init the set of all names + if allNames == nil { + allNames = map[string]struct{}{} + for _, fd := range msgd.Field { + allNames[fd.GetName()] = struct{}{} + } + for _, od := range msgd.OneofDecl { + allNames[od.GetName()] = struct{}{} + } + // NB: protoc only considers names of other fields and oneofs + // when computing the synthetic oneof name. But that feels like + // a bug, since it means it could generate a name that conflicts + // with some other symbol defined in the message. If it's decided + // that's NOT a bug and is desirable, then we should remove the + // following four loops to mimic protoc's behavior. + for _, xd := range msgd.Extension { + allNames[xd.GetName()] = struct{}{} + } + for _, ed := range msgd.EnumType { + allNames[ed.GetName()] = struct{}{} + for _, evd := range ed.Value { + allNames[evd.GetName()] = struct{}{} + } + } + for _, fd := range msgd.NestedType { + allNames[fd.GetName()] = struct{}{} + } + for _, n := range msgd.ReservedName { + allNames[n] = struct{}{} + } + } + + // Compute a name for the synthetic oneof. This uses the same + // algorithm as used in protoc: + // https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803 + ooName := fd.GetName() + if !strings.HasPrefix(ooName, "_") { + ooName = "_" + ooName + } + for { + _, ok := allNames[ooName] + if !ok { + // found a unique name + allNames[ooName] = struct{}{} + break + } + ooName = "X" + ooName + } + + fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl))) + ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)} + msgd.OneofDecl = append(msgd.OneofDecl, ood) + if callback != nil { + callback(fd, ood) + } + } + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go new file mode 100644 index 00000000..d7259e4a --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go @@ -0,0 +1,67 @@ +package internal + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" +) + +// RegisterExtensionsFromImportedFile registers extensions in the given file as well +// as those in its public imports. So if another file imports the given fd, this adds +// all extensions made visible to that importing file. +// +// All extensions in the given file are made visible to the importing file, and so are +// extensions in any public imports in the given file. +func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { + registerTypesForFile(reg, fd, true, true) +} + +// RegisterExtensionsVisibleToFile registers all extensions visible to the given file. +// This includes all extensions defined in fd and as well as extensions defined in the +// files that it imports (and any public imports thereof, etc). +// +// This is effectively the same as registering the extensions in fd and then calling +// RegisterExtensionsFromImportedFile for each file imported by fd. +func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { + registerTypesForFile(reg, fd, true, false) +} + +// RegisterTypesVisibleToFile registers all types visible to the given file. +// This is the same as RegisterExtensionsVisibleToFile but it also registers +// message and enum types, not just extensions. +func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { + registerTypesForFile(reg, fd, false, false) +} + +func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) { + registerTypes(reg, fd, extensionsOnly) + for i := 0; i < fd.Imports().Len(); i++ { + imp := fd.Imports().Get(i) + if imp.IsPublic || !publicImportsOnly { + registerTypesForFile(reg, imp, extensionsOnly, true) + } + } +} + +func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) { + for i := 0; i < elem.Extensions().Len(); i++ { + _ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i))) + } + if !extensionsOnly { + for i := 0; i < elem.Messages().Len(); i++ { + _ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i))) + } + for i := 0; i < elem.Enums().Len(); i++ { + _ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i))) + } + } + for i := 0; i < elem.Messages().Len(); i++ { + registerTypes(reg, elem.Messages().Get(i), extensionsOnly) + } +} + +type fileOrMessage interface { + Extensions() protoreflect.ExtensionDescriptors + Messages() protoreflect.MessageDescriptors + Enums() protoreflect.EnumDescriptors +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go new file mode 100644 index 00000000..60371288 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go @@ -0,0 +1,107 @@ +package internal + +import ( + "google.golang.org/protobuf/types/descriptorpb" +) + +// SourceInfoMap is a map of paths in a descriptor to the corresponding source +// code info. +type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location + +// Get returns the source code info for the given path. If there are +// multiple locations for the same path, the first one is returned. +func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location { + v := m[asMapKey(path)] + if len(v) > 0 { + return v[0] + } + return nil +} + +// GetAll returns all source code info for the given path. +func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location { + return m[asMapKey(path)] +} + +// Add stores the given source code info for the given path. +func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) { + m[asMapKey(path)] = append(m[asMapKey(path)], loc) +} + +// PutIfAbsent stores the given source code info for the given path only if the +// given path does not exist in the map. This method returns true when the value +// is stored, false if the path already exists. +func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool { + k := asMapKey(path) + if _, ok := m[k]; ok { + return false + } + m[k] = []*descriptorpb.SourceCodeInfo_Location{loc} + return true +} + +func asMapKey(slice []int32) string { + // NB: arrays should be usable as map keys, but this does not + // work due to a bug: https://github.com/golang/go/issues/22605 + //rv := reflect.ValueOf(slice) + //arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem()) + //array := reflect.New(arrayType).Elem() + //reflect.Copy(array, rv) + //return array.Interface() + + b := make([]byte, len(slice)*4) + j := 0 + for _, s := range slice { + b[j] = byte(s) + b[j+1] = byte(s >> 8) + b[j+2] = byte(s >> 16) + b[j+3] = byte(s >> 24) + j += 4 + } + return string(b) +} + +// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the +// source code info in the given file descriptor proto. +func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap { + res := SourceInfoMap{} + PopulateSourceInfoMap(fd, res) + return res +} + +// PopulateSourceInfoMap populates the given SourceInfoMap with information from +// the given file descriptor. +func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) { + for _, l := range fd.GetSourceCodeInfo().GetLocation() { + m.Add(l.Path, l) + } +} + +// NB: This wonkiness allows desc.Descriptor impl to implement an interface that +// is only usable from this package, by embedding a SourceInfoComputeFunc that +// implements the actual logic (which must live in desc package to avoid a +// dependency cycle). + +// SourceInfoComputer is a single method which will be invoked to recompute +// source info. This is needed for the protoparse package, which needs to link +// descriptors without source info in order to interpret options, but then needs +// to re-compute source info after that interpretation so that final linked +// descriptors expose the right info. +type SourceInfoComputer interface { + recomputeSourceInfo() +} + +// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will +// be aliased in the desc package to an unexported name so it is not marked as +// an exported field in reflection and not present in Go docs. +type SourceInfoComputeFunc func() + +func (f SourceInfoComputeFunc) recomputeSourceInfo() { + f() +} + +// RecomputeSourceInfo is used to initiate recomputation of source info. This is +// is used by the protoparse package, after it interprets options. +func RecomputeSourceInfo(c SourceInfoComputer) { + c.recomputeSourceInfo() +} diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go new file mode 100644 index 00000000..595c8720 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go @@ -0,0 +1,296 @@ +package internal + +import ( + "math" + "unicode" + "unicode/utf8" +) + +const ( + // MaxNormalTag is the maximum allowed tag number for a field in a normal message. + MaxNormalTag = 536870911 // 2^29 - 1 + + // MaxMessageSetTag is the maximum allowed tag number of a field in a message that + // uses the message set wire format. + MaxMessageSetTag = math.MaxInt32 - 1 + + // MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag + // since that is the absolute highest allowed.) + MaxTag = MaxMessageSetTag + + // SpecialReservedStart is the first tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedStart = 19000 + // SpecialReservedEnd is the last tag in a range that is reserved and not + // allowed for use in message definitions. + SpecialReservedEnd = 19999 + + // NB: It would be nice to use constants from generated code instead of + // hard-coding these here. But code-gen does not emit these as constants + // anywhere. The only places they appear in generated code are struct tags + // on fields of the generated descriptor protos. + + // File_packageTag is the tag number of the package element in a file + // descriptor proto. + File_packageTag = 2 + // File_dependencyTag is the tag number of the dependencies element in a + // file descriptor proto. + File_dependencyTag = 3 + // File_messagesTag is the tag number of the messages element in a file + // descriptor proto. + File_messagesTag = 4 + // File_enumsTag is the tag number of the enums element in a file descriptor + // proto. + File_enumsTag = 5 + // File_servicesTag is the tag number of the services element in a file + // descriptor proto. + File_servicesTag = 6 + // File_extensionsTag is the tag number of the extensions element in a file + // descriptor proto. + File_extensionsTag = 7 + // File_optionsTag is the tag number of the options element in a file + // descriptor proto. + File_optionsTag = 8 + // File_syntaxTag is the tag number of the syntax element in a file + // descriptor proto. + File_syntaxTag = 12 + // File_editionTag is the tag number of the edition element in a file + // descriptor proto. + File_editionTag = 14 + // Message_nameTag is the tag number of the name element in a message + // descriptor proto. + Message_nameTag = 1 + // Message_fieldsTag is the tag number of the fields element in a message + // descriptor proto. + Message_fieldsTag = 2 + // Message_nestedMessagesTag is the tag number of the nested messages + // element in a message descriptor proto. + Message_nestedMessagesTag = 3 + // Message_enumsTag is the tag number of the enums element in a message + // descriptor proto. + Message_enumsTag = 4 + // Message_extensionRangeTag is the tag number of the extension ranges + // element in a message descriptor proto. + Message_extensionRangeTag = 5 + // Message_extensionsTag is the tag number of the extensions element in a + // message descriptor proto. + Message_extensionsTag = 6 + // Message_optionsTag is the tag number of the options element in a message + // descriptor proto. + Message_optionsTag = 7 + // Message_oneOfsTag is the tag number of the one-ofs element in a message + // descriptor proto. + Message_oneOfsTag = 8 + // Message_reservedRangeTag is the tag number of the reserved ranges element + // in a message descriptor proto. + Message_reservedRangeTag = 9 + // Message_reservedNameTag is the tag number of the reserved names element + // in a message descriptor proto. + Message_reservedNameTag = 10 + // ExtensionRange_startTag is the tag number of the start index in an + // extension range proto. + ExtensionRange_startTag = 1 + // ExtensionRange_endTag is the tag number of the end index in an + // extension range proto. + ExtensionRange_endTag = 2 + // ExtensionRange_optionsTag is the tag number of the options element in an + // extension range proto. + ExtensionRange_optionsTag = 3 + // ReservedRange_startTag is the tag number of the start index in a reserved + // range proto. + ReservedRange_startTag = 1 + // ReservedRange_endTag is the tag number of the end index in a reserved + // range proto. + ReservedRange_endTag = 2 + // Field_nameTag is the tag number of the name element in a field descriptor + // proto. + Field_nameTag = 1 + // Field_extendeeTag is the tag number of the extendee element in a field + // descriptor proto. + Field_extendeeTag = 2 + // Field_numberTag is the tag number of the number element in a field + // descriptor proto. + Field_numberTag = 3 + // Field_labelTag is the tag number of the label element in a field + // descriptor proto. + Field_labelTag = 4 + // Field_typeTag is the tag number of the type element in a field descriptor + // proto. + Field_typeTag = 5 + // Field_typeNameTag is the tag number of the type name element in a field + // descriptor proto. + Field_typeNameTag = 6 + // Field_defaultTag is the tag number of the default value element in a + // field descriptor proto. + Field_defaultTag = 7 + // Field_optionsTag is the tag number of the options element in a field + // descriptor proto. + Field_optionsTag = 8 + // Field_jsonNameTag is the tag number of the JSON name element in a field + // descriptor proto. + Field_jsonNameTag = 10 + // Field_proto3OptionalTag is the tag number of the proto3_optional element + // in a descriptor proto. + Field_proto3OptionalTag = 17 + // OneOf_nameTag is the tag number of the name element in a one-of + // descriptor proto. + OneOf_nameTag = 1 + // OneOf_optionsTag is the tag number of the options element in a one-of + // descriptor proto. + OneOf_optionsTag = 2 + // Enum_nameTag is the tag number of the name element in an enum descriptor + // proto. + Enum_nameTag = 1 + // Enum_valuesTag is the tag number of the values element in an enum + // descriptor proto. + Enum_valuesTag = 2 + // Enum_optionsTag is the tag number of the options element in an enum + // descriptor proto. + Enum_optionsTag = 3 + // Enum_reservedRangeTag is the tag number of the reserved ranges element in + // an enum descriptor proto. + Enum_reservedRangeTag = 4 + // Enum_reservedNameTag is the tag number of the reserved names element in + // an enum descriptor proto. + Enum_reservedNameTag = 5 + // EnumVal_nameTag is the tag number of the name element in an enum value + // descriptor proto. + EnumVal_nameTag = 1 + // EnumVal_numberTag is the tag number of the number element in an enum + // value descriptor proto. + EnumVal_numberTag = 2 + // EnumVal_optionsTag is the tag number of the options element in an enum + // value descriptor proto. + EnumVal_optionsTag = 3 + // Service_nameTag is the tag number of the name element in a service + // descriptor proto. + Service_nameTag = 1 + // Service_methodsTag is the tag number of the methods element in a service + // descriptor proto. + Service_methodsTag = 2 + // Service_optionsTag is the tag number of the options element in a service + // descriptor proto. + Service_optionsTag = 3 + // Method_nameTag is the tag number of the name element in a method + // descriptor proto. + Method_nameTag = 1 + // Method_inputTag is the tag number of the input type element in a method + // descriptor proto. + Method_inputTag = 2 + // Method_outputTag is the tag number of the output type element in a method + // descriptor proto. + Method_outputTag = 3 + // Method_optionsTag is the tag number of the options element in a method + // descriptor proto. + Method_optionsTag = 4 + // Method_inputStreamTag is the tag number of the input stream flag in a + // method descriptor proto. + Method_inputStreamTag = 5 + // Method_outputStreamTag is the tag number of the output stream flag in a + // method descriptor proto. + Method_outputStreamTag = 6 + + // UninterpretedOptionsTag is the tag number of the uninterpreted options + // element. All *Options messages use the same tag for the field that stores + // uninterpreted options. + UninterpretedOptionsTag = 999 + + // Uninterpreted_nameTag is the tag number of the name element in an + // uninterpreted options proto. + Uninterpreted_nameTag = 2 + // Uninterpreted_identTag is the tag number of the identifier value in an + // uninterpreted options proto. + Uninterpreted_identTag = 3 + // Uninterpreted_posIntTag is the tag number of the positive int value in an + // uninterpreted options proto. + Uninterpreted_posIntTag = 4 + // Uninterpreted_negIntTag is the tag number of the negative int value in an + // uninterpreted options proto. + Uninterpreted_negIntTag = 5 + // Uninterpreted_doubleTag is the tag number of the double value in an + // uninterpreted options proto. + Uninterpreted_doubleTag = 6 + // Uninterpreted_stringTag is the tag number of the string value in an + // uninterpreted options proto. + Uninterpreted_stringTag = 7 + // Uninterpreted_aggregateTag is the tag number of the aggregate value in an + // uninterpreted options proto. + Uninterpreted_aggregateTag = 8 + // UninterpretedName_nameTag is the tag number of the name element in an + // uninterpreted option name proto. + UninterpretedName_nameTag = 1 +) + +// JsonName returns the default JSON name for a field with the given name. +// This mirrors the algorithm in protoc: +// +// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95 +func JsonName(name string) string { + var js []rune + nextUpper := false + for _, r := range name { + if r == '_' { + nextUpper = true + continue + } + if nextUpper { + nextUpper = false + js = append(js, unicode.ToUpper(r)) + } else { + js = append(js, r) + } + } + return string(js) +} + +// InitCap returns the given field name, but with the first letter capitalized. +func InitCap(name string) string { + r, sz := utf8.DecodeRuneInString(name) + return string(unicode.ToUpper(r)) + name[sz:] +} + +// CreatePrefixList returns a list of package prefixes to search when resolving +// a symbol name. If the given package is blank, it returns only the empty +// string. If the given package contains only one token, e.g. "foo", it returns +// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns +// successively shorter prefixes of the package and then the empty string. For +// example, for a package named "foo.bar.baz" it will return the following list: +// +// ["foo.bar.baz", "foo.bar", "foo", ""] +func CreatePrefixList(pkg string) []string { + if pkg == "" { + return []string{""} + } + + numDots := 0 + // one pass to pre-allocate the returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + numDots++ + } + } + if numDots == 0 { + return []string{pkg, ""} + } + + prefixes := make([]string, numDots+2) + // second pass to fill in returned slice + for i := 0; i < len(pkg); i++ { + if pkg[i] == '.' { + prefixes[numDots] = pkg[:i] + numDots-- + } + } + prefixes[0] = pkg + + return prefixes +} + +// GetMaxTag returns the max tag number allowed, based on whether a message uses +// message set wire format or not. +func GetMaxTag(isMessageSet bool) int32 { + if isMessageSet { + return MaxMessageSetTag + } + return MaxNormalTag +} diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go new file mode 100644 index 00000000..8776ab0b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/load.go @@ -0,0 +1,257 @@ +package desc + +import ( + "fmt" + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/sourceinfo" + "github.com/jhump/protoreflect/internal" +) + +// The global cache is used to store descriptors that wrap items in +// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents +// repeating work to re-wrap underlying global descriptors. +var ( + // We put all wrapped file and message descriptors in this cache. + loadedDescriptors = lockingCache{cache: mapCache{}} + + // Unfortunately, we need a different mechanism for enums for + // compatibility with old APIs, which required that they were + // registered in a different way :( + loadedEnumsMu sync.RWMutex + loadedEnums = map[reflect.Type]*EnumDescriptor{} +) + +// LoadFileDescriptor creates a file descriptor using the bytes returned by +// proto.FileDescriptor. Descriptors are cached so that they do not need to be +// re-processed if the same file is fetched again later. +func LoadFileDescriptor(file string) (*FileDescriptor, error) { + d, err := sourceinfo.GlobalFiles.FindFileByPath(file) + if err == protoregistry.NotFound { + // for backwards compatibility, see if this matches a known old + // alias for the file (older versions of libraries that registered + // the files using incorrect/non-canonical paths) + if alt := internal.StdFileAliases[file]; alt != "" { + d, err = sourceinfo.GlobalFiles.FindFileByPath(alt) + } + } + if err != nil { + if err != protoregistry.NotFound { + return nil, internal.ErrNoSuchFile(file) + } + return nil, err + } + if fd := loadedDescriptors.get(d); fd != nil { + return fd.(*FileDescriptor), nil + } + + var fd *FileDescriptor + loadedDescriptors.withLock(func(cache descriptorCache) { + fd, err = wrapFile(d, cache) + }) + return fd, err +} + +// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by +// Message.Descriptor() for the given message type. If the given type is not recognized, +// then a nil descriptor is returned. +func LoadMessageDescriptor(message string) (*MessageDescriptor, error) { + mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message)) + if err != nil { + if err == protoregistry.NotFound { + return nil, nil + } + return nil, err + } + return loadMessageDescriptor(mt.Descriptor()) +} + +func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) { + d := loadedDescriptors.get(md) + if d != nil { + return d.(*MessageDescriptor), nil + } + + var err error + loadedDescriptors.withLock(func(cache descriptorCache) { + d, err = wrapMessage(md, cache) + }) + if err != nil { + return nil, err + } + return d.(*MessageDescriptor), err +} + +// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned +// by message.Descriptor() for the given message type. If the given type is not recognized, +// then a nil descriptor is returned. +func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) { + m, err := messageFromType(messageType) + if err != nil { + return nil, err + } + return LoadMessageDescriptorForMessage(m) +} + +// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto +// returned by message.Descriptor(). If the given type is not recognized, then a nil +// descriptor is returned. +func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) { + // efficiently handle dynamic messages + type descriptorable interface { + GetMessageDescriptor() *MessageDescriptor + } + if d, ok := message.(descriptorable); ok { + return d.GetMessageDescriptor(), nil + } + + var md protoreflect.MessageDescriptor + if m, ok := message.(protoreflect.ProtoMessage); ok { + md = m.ProtoReflect().Descriptor() + } else { + md = proto.MessageReflect(message).Descriptor() + } + return loadMessageDescriptor(sourceinfo.WrapMessage(md)) +} + +func messageFromType(mt reflect.Type) (proto.Message, error) { + if mt.Kind() != reflect.Ptr { + mt = reflect.PtrTo(mt) + } + m, ok := reflect.Zero(mt).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", mt) + } + return m, nil +} + +// interface implemented by all generated enums +type protoEnum interface { + EnumDescriptor() ([]byte, []int) +} + +// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because +// it is not useful since protoc-gen-go does not expose the name anywhere in generated +// code or register it in a way that is it accessible for reflection code. This also +// means we have to cache enum descriptors differently -- we can only cache them as +// they are requested, as opposed to caching all enum types whenever a file descriptor +// is cached. This is because we need to know the generated type of the enums, and we +// don't know that at the time of caching file descriptors. + +// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned +// by enum.EnumDescriptor() for the given enum type. +func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) { + // we cache descriptors using non-pointer type + if enumType.Kind() == reflect.Ptr { + enumType = enumType.Elem() + } + e := getEnumFromCache(enumType) + if e != nil { + return e, nil + } + enum, err := enumFromType(enumType) + if err != nil { + return nil, err + } + + return loadEnumDescriptor(enumType, enum) +} + +func getEnumFromCache(t reflect.Type) *EnumDescriptor { + loadedEnumsMu.RLock() + defer loadedEnumsMu.RUnlock() + return loadedEnums[t] +} + +func putEnumInCache(t reflect.Type, d *EnumDescriptor) { + loadedEnumsMu.Lock() + defer loadedEnumsMu.Unlock() + loadedEnums[t] = d +} + +// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto +// returned by enum.EnumDescriptor(). +func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) { + et := reflect.TypeOf(enum) + // we cache descriptors using non-pointer type + if et.Kind() == reflect.Ptr { + et = et.Elem() + enum = reflect.Zero(et).Interface().(protoEnum) + } + e := getEnumFromCache(et) + if e != nil { + return e, nil + } + + return loadEnumDescriptor(et, enum) +} + +func enumFromType(et reflect.Type) (protoEnum, error) { + e, ok := reflect.Zero(et).Interface().(protoEnum) + if !ok { + if et.Kind() != reflect.Ptr { + et = et.Elem() + } + e, ok = reflect.Zero(et).Interface().(protoEnum) + } + if !ok { + return nil, fmt.Errorf("failed to create enum from type: %v", et) + } + return e, nil +} + +func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) { + fdb, path := enum.EnumDescriptor() + name := fmt.Sprintf("%T", enum) + fd, err := internal.DecodeFileDescriptor(name, fdb) + return fd, path, err +} + +func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) { + fdp, path, err := getDescriptorForEnum(enum) + if err != nil { + return nil, err + } + + fd, err := LoadFileDescriptor(fdp.GetName()) + if err != nil { + return nil, err + } + + ed := findEnum(fd, path) + putEnumInCache(et, ed) + return ed, nil +} + +func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor { + if len(path) == 1 { + return fd.GetEnumTypes()[path[0]] + } + md := fd.GetMessageTypes()[path[0]] + for _, i := range path[1 : len(path)-1] { + md = md.GetNestedMessageTypes()[i] + } + return md.GetNestedEnumTypes()[path[len(path)-1]] +} + +// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given +// extension description. +func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) { + file, err := LoadFileDescriptor(ext.Filename) + if err != nil { + return nil, err + } + field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor) + // make sure descriptor agrees with attributes of the ExtensionDesc + if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) || + field.GetNumber() != ext.Field { + return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name) + } + return field, nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go new file mode 100644 index 00000000..2b6b1244 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go @@ -0,0 +1,716 @@ +package protoparse + +import ( + "fmt" + + "github.com/bufbuild/protocompile/ast" + + ast2 "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +func convertAST(file *ast.FileNode) *ast2.FileNode { + elements := make([]ast2.FileElement, len(file.Decls)) + for i := range file.Decls { + elements[i] = convertASTFileElement(file, file.Decls[i]) + } + root := ast2.NewFileNode(convertASTSyntax(file, file.Syntax), elements) + eofInfo := file.NodeInfo(file.EOF) + root.FinalComments = convertASTComments(eofInfo.LeadingComments()) + root.FinalWhitespace = eofInfo.LeadingWhitespace() + return root +} + +func convertASTSyntax(f *ast.FileNode, s *ast.SyntaxNode) *ast2.SyntaxNode { + return ast2.NewSyntaxNode( + convertASTKeyword(f, s.Keyword), + convertASTRune(f, s.Equals), + convertASTString(f, s.Syntax), + convertASTRune(f, s.Semicolon), + ) +} + +func convertASTFileElement(f *ast.FileNode, el ast.FileElement) ast2.FileElement { + switch el := el.(type) { + case *ast.ImportNode: + return convertASTImport(f, el) + case *ast.PackageNode: + return convertASTPackage(f, el) + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.MessageNode: + return convertASTMessage(f, el) + case *ast.EnumNode: + return convertASTEnum(f, el) + case *ast.ExtendNode: + return convertASTExtend(f, el) + case *ast.ServiceNode: + return convertASTService(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.FileElement: %T", el)) + } +} + +func convertASTImport(f *ast.FileNode, imp *ast.ImportNode) *ast2.ImportNode { + var public, weak *ast2.KeywordNode + if imp.Public != nil { + public = convertASTKeyword(f, imp.Public) + } + if imp.Weak != nil { + weak = convertASTKeyword(f, imp.Weak) + } + return ast2.NewImportNode( + convertASTKeyword(f, imp.Keyword), + public, weak, + convertASTString(f, imp.Name), + convertASTRune(f, imp.Semicolon), + ) +} + +func convertASTPackage(f *ast.FileNode, p *ast.PackageNode) *ast2.PackageNode { + return ast2.NewPackageNode( + convertASTKeyword(f, p.Keyword), + convertASTIdent(f, p.Name), + convertASTRune(f, p.Semicolon), + ) +} + +func convertASTOption(f *ast.FileNode, o *ast.OptionNode) *ast2.OptionNode { + if o.Keyword == nil { + return ast2.NewCompactOptionNode( + convertASTOptionName(f, o.Name), + convertASTRune(f, o.Equals), + convertASTValue(f, o.Val), + ) + } + return ast2.NewOptionNode( + convertASTKeyword(f, o.Keyword), + convertASTOptionName(f, o.Name), + convertASTRune(f, o.Equals), + convertASTValue(f, o.Val), + convertASTRune(f, o.Semicolon), + ) +} + +func convertASTOptionName(f *ast.FileNode, n *ast.OptionNameNode) *ast2.OptionNameNode { + parts := make([]*ast2.FieldReferenceNode, len(n.Parts)) + for i := range n.Parts { + parts[i] = convertASTFieldReference(f, n.Parts[i]) + } + dots := make([]*ast2.RuneNode, len(n.Dots)) + for i := range n.Dots { + dots[i] = convertASTRune(f, n.Dots[i]) + } + return ast2.NewOptionNameNode(parts, dots) +} + +func convertASTFieldReference(f *ast.FileNode, n *ast.FieldReferenceNode) *ast2.FieldReferenceNode { + switch { + case n.IsExtension(): + return ast2.NewExtensionFieldReferenceNode( + convertASTRune(f, n.Open), + convertASTIdent(f, n.Name), + convertASTRune(f, n.Close), + ) + case n.IsAnyTypeReference(): + return ast2.NewAnyTypeReferenceNode( + convertASTRune(f, n.Open), + convertASTIdent(f, n.URLPrefix), + convertASTRune(f, n.Slash), + convertASTIdent(f, n.Name), + convertASTRune(f, n.Close), + ) + default: + return ast2.NewFieldReferenceNode(convertASTIdent(f, n.Name).(*ast2.IdentNode)) + } +} + +func convertASTMessage(f *ast.FileNode, m *ast.MessageNode) *ast2.MessageNode { + decls := make([]ast2.MessageElement, len(m.Decls)) + for i := range m.Decls { + decls[i] = convertASTMessageElement(f, m.Decls[i]) + } + return ast2.NewMessageNode( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTRune(f, m.OpenBrace), + decls, + convertASTRune(f, m.CloseBrace), + ) +} + +func convertASTMessageElement(f *ast.FileNode, el ast.MessageElement) ast2.MessageElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.MapFieldNode: + return convertASTMapField(f, el) + case *ast.OneofNode: + return convertASTOneOf(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.MessageNode: + return convertASTMessage(f, el) + case *ast.EnumNode: + return convertASTEnum(f, el) + case *ast.ExtendNode: + return convertASTExtend(f, el) + case *ast.ExtensionRangeNode: + return convertASTExtensions(f, el) + case *ast.ReservedNode: + return convertASTReserved(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.MessageElement: %T", el)) + } +} + +func convertASTField(f *ast.FileNode, fld *ast.FieldNode) *ast2.FieldNode { + var lbl *ast2.KeywordNode + if fld.Label.KeywordNode != nil { + lbl = convertASTKeyword(f, fld.Label.KeywordNode) + } + var opts *ast2.CompactOptionsNode + if fld.Options != nil { + opts = convertASTCompactOptions(f, fld.Options) + } + return ast2.NewFieldNode( + lbl, + convertASTIdent(f, fld.FldType), + convertASTIdentToken(f, fld.Name), + convertASTRune(f, fld.Equals), + convertASTUintLiteral(f, fld.Tag), + opts, + convertASTRune(f, fld.Semicolon), + ) +} + +func convertASTMapField(f *ast.FileNode, fld *ast.MapFieldNode) *ast2.MapFieldNode { + var opts *ast2.CompactOptionsNode + if fld.Options != nil { + opts = convertASTCompactOptions(f, fld.Options) + } + return ast2.NewMapFieldNode( + convertASTMapFieldType(f, fld.MapType), + convertASTIdentToken(f, fld.Name), + convertASTRune(f, fld.Equals), + convertASTUintLiteral(f, fld.Tag), + opts, + convertASTRune(f, fld.Semicolon), + ) +} + +func convertASTMapFieldType(f *ast.FileNode, t *ast.MapTypeNode) *ast2.MapTypeNode { + return ast2.NewMapTypeNode( + convertASTKeyword(f, t.Keyword), + convertASTRune(f, t.OpenAngle), + convertASTIdentToken(f, t.KeyType), + convertASTRune(f, t.Comma), + convertASTIdent(f, t.ValueType), + convertASTRune(f, t.CloseAngle), + ) +} + +func convertASTGroup(f *ast.FileNode, g *ast.GroupNode) *ast2.GroupNode { + var lbl *ast2.KeywordNode + if g.Label.KeywordNode != nil { + lbl = convertASTKeyword(f, g.Label.KeywordNode) + } + var opts *ast2.CompactOptionsNode + if g.Options != nil { + opts = convertASTCompactOptions(f, g.Options) + } + decls := make([]ast2.MessageElement, len(g.Decls)) + for i := range g.Decls { + decls[i] = convertASTMessageElement(f, g.Decls[i]) + } + return ast2.NewGroupNode( + lbl, + convertASTKeyword(f, g.Keyword), + convertASTIdentToken(f, g.Name), + convertASTRune(f, g.Equals), + convertASTUintLiteral(f, g.Tag), + opts, + convertASTRune(f, g.OpenBrace), + decls, + convertASTRune(f, g.CloseBrace), + ) +} + +func convertASTOneOf(f *ast.FileNode, oo *ast.OneofNode) *ast2.OneOfNode { + decls := make([]ast2.OneOfElement, len(oo.Decls)) + for i := range oo.Decls { + decls[i] = convertASTOneOfElement(f, oo.Decls[i]) + } + return ast2.NewOneOfNode( + convertASTKeyword(f, oo.Keyword), + convertASTIdentToken(f, oo.Name), + convertASTRune(f, oo.OpenBrace), + decls, + convertASTRune(f, oo.CloseBrace), + ) +} + +func convertASTOneOfElement(f *ast.FileNode, el ast.OneofElement) ast2.OneOfElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.OneOfElement: %T", el)) + } +} + +func convertASTExtensions(f *ast.FileNode, e *ast.ExtensionRangeNode) *ast2.ExtensionRangeNode { + var opts *ast2.CompactOptionsNode + if e.Options != nil { + opts = convertASTCompactOptions(f, e.Options) + } + ranges := make([]*ast2.RangeNode, len(e.Ranges)) + for i := range e.Ranges { + ranges[i] = convertASTRange(f, e.Ranges[i]) + } + commas := make([]*ast2.RuneNode, len(e.Commas)) + for i := range e.Commas { + commas[i] = convertASTRune(f, e.Commas[i]) + } + return ast2.NewExtensionRangeNode( + convertASTKeyword(f, e.Keyword), + ranges, commas, opts, + convertASTRune(f, e.Semicolon), + ) +} + +func convertASTReserved(f *ast.FileNode, r *ast.ReservedNode) *ast2.ReservedNode { + ranges := make([]*ast2.RangeNode, len(r.Ranges)) + for i := range r.Ranges { + ranges[i] = convertASTRange(f, r.Ranges[i]) + } + commas := make([]*ast2.RuneNode, len(r.Commas)) + for i := range r.Commas { + commas[i] = convertASTRune(f, r.Commas[i]) + } + names := make([]ast2.StringValueNode, len(r.Names)) + for i := range r.Names { + names[i] = convertASTString(f, r.Names[i]) + } + if len(r.Ranges) > 0 { + return ast2.NewReservedRangesNode( + convertASTKeyword(f, r.Keyword), + ranges, commas, + convertASTRune(f, r.Semicolon), + ) + } + return ast2.NewReservedNamesNode( + convertASTKeyword(f, r.Keyword), + names, commas, + convertASTRune(f, r.Semicolon), + ) +} + +func convertASTRange(f *ast.FileNode, r *ast.RangeNode) *ast2.RangeNode { + var to, max *ast2.KeywordNode + var end ast2.IntValueNode + if r.To != nil { + to = convertASTKeyword(f, r.To) + } + if r.Max != nil { + max = convertASTKeyword(f, r.Max) + } + if r.EndVal != nil { + end = convertASTInt(f, r.EndVal) + } + return ast2.NewRangeNode( + convertASTInt(f, r.StartVal), + to, end, max, + ) +} + +func convertASTEnum(f *ast.FileNode, e *ast.EnumNode) *ast2.EnumNode { + decls := make([]ast2.EnumElement, len(e.Decls)) + for i := range e.Decls { + decls[i] = convertASTEnumElement(f, e.Decls[i]) + } + return ast2.NewEnumNode( + convertASTKeyword(f, e.Keyword), + convertASTIdentToken(f, e.Name), + convertASTRune(f, e.OpenBrace), + decls, + convertASTRune(f, e.CloseBrace), + ) +} + +func convertASTEnumElement(f *ast.FileNode, el ast.EnumElement) ast2.EnumElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.EnumValueNode: + return convertASTEnumValue(f, el) + case *ast.ReservedNode: + return convertASTReserved(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.EnumElement: %T", el)) + } +} + +func convertASTEnumValue(f *ast.FileNode, e *ast.EnumValueNode) *ast2.EnumValueNode { + var opts *ast2.CompactOptionsNode + if e.Options != nil { + opts = convertASTCompactOptions(f, e.Options) + } + return ast2.NewEnumValueNode( + convertASTIdentToken(f, e.Name), + convertASTRune(f, e.Equals), + convertASTInt(f, e.Number), + opts, + convertASTRune(f, e.Semicolon), + ) +} + +func convertASTExtend(f *ast.FileNode, e *ast.ExtendNode) *ast2.ExtendNode { + decls := make([]ast2.ExtendElement, len(e.Decls)) + for i := range e.Decls { + decls[i] = convertASTExtendElement(f, e.Decls[i]) + } + return ast2.NewExtendNode( + convertASTKeyword(f, e.Keyword), + convertASTIdent(f, e.Extendee), + convertASTRune(f, e.OpenBrace), + decls, + convertASTRune(f, e.CloseBrace), + ) +} + +func convertASTExtendElement(f *ast.FileNode, el ast.ExtendElement) ast2.ExtendElement { + switch el := el.(type) { + case *ast.FieldNode: + return convertASTField(f, el) + case *ast.GroupNode: + return convertASTGroup(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.ExtendElement: %T", el)) + } +} + +func convertASTService(f *ast.FileNode, s *ast.ServiceNode) *ast2.ServiceNode { + decls := make([]ast2.ServiceElement, len(s.Decls)) + for i := range s.Decls { + decls[i] = convertASTServiceElement(f, s.Decls[i]) + } + return ast2.NewServiceNode( + convertASTKeyword(f, s.Keyword), + convertASTIdentToken(f, s.Name), + convertASTRune(f, s.OpenBrace), + decls, + convertASTRune(f, s.CloseBrace), + ) +} + +func convertASTServiceElement(f *ast.FileNode, el ast.ServiceElement) ast2.ServiceElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.RPCNode: + return convertASTMethod(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.ServiceElement: %T", el)) + } +} + +func convertASTMethod(f *ast.FileNode, m *ast.RPCNode) *ast2.RPCNode { + if m.OpenBrace == nil { + return ast2.NewRPCNode( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTMethodType(f, m.Input), + convertASTKeyword(f, m.Returns), + convertASTMethodType(f, m.Output), + convertASTRune(f, m.Semicolon), + ) + } + decls := make([]ast2.RPCElement, len(m.Decls)) + for i := range m.Decls { + decls[i] = convertASTMethodElement(f, m.Decls[i]) + } + return ast2.NewRPCNodeWithBody( + convertASTKeyword(f, m.Keyword), + convertASTIdentToken(f, m.Name), + convertASTMethodType(f, m.Input), + convertASTKeyword(f, m.Returns), + convertASTMethodType(f, m.Output), + convertASTRune(f, m.OpenBrace), + decls, + convertASTRune(f, m.CloseBrace), + ) +} + +func convertASTMethodElement(f *ast.FileNode, el ast.RPCElement) ast2.RPCElement { + switch el := el.(type) { + case *ast.OptionNode: + return convertASTOption(f, el) + case *ast.EmptyDeclNode: + return convertASTEmpty(f, el) + default: + panic(fmt.Sprintf("unrecognized type of ast.RPCElement: %T", el)) + } +} + +func convertASTMethodType(f *ast.FileNode, t *ast.RPCTypeNode) *ast2.RPCTypeNode { + var stream *ast2.KeywordNode + if t.Stream != nil { + stream = convertASTKeyword(f, t.Stream) + } + return ast2.NewRPCTypeNode( + convertASTRune(f, t.OpenParen), + stream, + convertASTIdent(f, t.MessageType), + convertASTRune(f, t.CloseParen), + ) +} + +func convertASTCompactOptions(f *ast.FileNode, opts *ast.CompactOptionsNode) *ast2.CompactOptionsNode { + elems := make([]*ast2.OptionNode, len(opts.Options)) + for i := range opts.Options { + elems[i] = convertASTOption(f, opts.Options[i]) + } + commas := make([]*ast2.RuneNode, len(opts.Commas)) + for i := range opts.Commas { + commas[i] = convertASTRune(f, opts.Commas[i]) + } + return ast2.NewCompactOptionsNode( + convertASTRune(f, opts.OpenBracket), + elems, commas, + convertASTRune(f, opts.CloseBracket), + ) +} + +func convertASTEmpty(f *ast.FileNode, e *ast.EmptyDeclNode) *ast2.EmptyDeclNode { + return ast2.NewEmptyDeclNode(convertASTRune(f, e.Semicolon)) +} + +func convertASTValue(f *ast.FileNode, v ast.ValueNode) ast2.ValueNode { + switch v := v.(type) { + case *ast.IdentNode: + return convertASTIdentToken(f, v) + case *ast.CompoundIdentNode: + return convertASTCompoundIdent(f, v) + case *ast.StringLiteralNode: + return convertASTStringLiteral(f, v) + case *ast.CompoundStringLiteralNode: + return convertASTCompoundStringLiteral(f, v) + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, v) + case *ast.NegativeIntLiteralNode: + return convertASTNegativeIntLiteral(f, v) + case *ast.FloatLiteralNode: + return convertASTFloatLiteral(f, v) + case *ast.SpecialFloatLiteralNode: + return convertASTSpecialFloatLiteral(f, v) + case *ast.SignedFloatLiteralNode: + return convertASTSignedFloatLiteral(f, v) + case *ast.ArrayLiteralNode: + return convertASTArrayLiteral(f, v) + case *ast.MessageLiteralNode: + return convertASTMessageLiteral(f, v) + default: + panic(fmt.Sprintf("unrecognized type of ast.ValueNode: %T", v)) + } +} + +func convertASTIdent(f *ast.FileNode, ident ast.IdentValueNode) ast2.IdentValueNode { + switch ident := ident.(type) { + case *ast.IdentNode: + return convertASTIdentToken(f, ident) + case *ast.CompoundIdentNode: + return convertASTCompoundIdent(f, ident) + default: + panic(fmt.Sprintf("unrecognized type of ast.IdentValueNode: %T", ident)) + } +} + +func convertASTIdentToken(f *ast.FileNode, ident *ast.IdentNode) *ast2.IdentNode { + return ast2.NewIdentNode(ident.Val, convertASTTokenInfo(f, ident.Token())) +} + +func convertASTCompoundIdent(f *ast.FileNode, ident *ast.CompoundIdentNode) *ast2.CompoundIdentNode { + var leadingDot *ast2.RuneNode + if ident.LeadingDot != nil { + leadingDot = convertASTRune(f, ident.LeadingDot) + } + components := make([]*ast2.IdentNode, len(ident.Components)) + for i := range ident.Components { + components[i] = convertASTIdentToken(f, ident.Components[i]) + } + dots := make([]*ast2.RuneNode, len(ident.Dots)) + for i := range ident.Dots { + dots[i] = convertASTRune(f, ident.Dots[i]) + } + return ast2.NewCompoundIdentNode(leadingDot, components, dots) +} + +func convertASTString(f *ast.FileNode, str ast.StringValueNode) ast2.StringValueNode { + switch str := str.(type) { + case *ast.StringLiteralNode: + return convertASTStringLiteral(f, str) + case *ast.CompoundStringLiteralNode: + return convertASTCompoundStringLiteral(f, str) + default: + panic(fmt.Sprintf("unrecognized type of ast.StringValueNode: %T", str)) + } +} + +func convertASTStringLiteral(f *ast.FileNode, str *ast.StringLiteralNode) *ast2.StringLiteralNode { + return ast2.NewStringLiteralNode(str.Val, convertASTTokenInfo(f, str.Token())) +} + +func convertASTCompoundStringLiteral(f *ast.FileNode, str *ast.CompoundStringLiteralNode) *ast2.CompoundStringLiteralNode { + children := str.Children() + components := make([]*ast2.StringLiteralNode, len(children)) + for i := range children { + components[i] = convertASTStringLiteral(f, children[i].(*ast.StringLiteralNode)) + } + return ast2.NewCompoundLiteralStringNode(components...) +} + +func convertASTInt(f *ast.FileNode, n ast.IntValueNode) ast2.IntValueNode { + switch n := n.(type) { + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, n) + case *ast.NegativeIntLiteralNode: + return convertASTNegativeIntLiteral(f, n) + default: + panic(fmt.Sprintf("unrecognized type of ast.IntValueNode: %T", n)) + } +} + +func convertASTUintLiteral(f *ast.FileNode, n *ast.UintLiteralNode) *ast2.UintLiteralNode { + return ast2.NewUintLiteralNode(n.Val, convertASTTokenInfo(f, n.Token())) +} + +func convertASTNegativeIntLiteral(f *ast.FileNode, n *ast.NegativeIntLiteralNode) *ast2.NegativeIntLiteralNode { + return ast2.NewNegativeIntLiteralNode(convertASTRune(f, n.Minus), convertASTUintLiteral(f, n.Uint)) +} + +func convertASTFloat(f *ast.FileNode, n ast.FloatValueNode) ast2.FloatValueNode { + switch n := n.(type) { + case *ast.FloatLiteralNode: + return convertASTFloatLiteral(f, n) + case *ast.SpecialFloatLiteralNode: + return convertASTSpecialFloatLiteral(f, n) + case *ast.UintLiteralNode: + return convertASTUintLiteral(f, n) + default: + panic(fmt.Sprintf("unrecognized type of ast.FloatValueNode: %T", n)) + } +} + +func convertASTFloatLiteral(f *ast.FileNode, n *ast.FloatLiteralNode) *ast2.FloatLiteralNode { + return ast2.NewFloatLiteralNode(n.Val, convertASTTokenInfo(f, n.Token())) +} + +func convertASTSpecialFloatLiteral(f *ast.FileNode, n *ast.SpecialFloatLiteralNode) *ast2.SpecialFloatLiteralNode { + return ast2.NewSpecialFloatLiteralNode(convertASTKeyword(f, n.KeywordNode)) +} + +func convertASTSignedFloatLiteral(f *ast.FileNode, n *ast.SignedFloatLiteralNode) *ast2.SignedFloatLiteralNode { + return ast2.NewSignedFloatLiteralNode(convertASTRune(f, n.Sign), convertASTFloat(f, n.Float)) +} + +func convertASTArrayLiteral(f *ast.FileNode, ar *ast.ArrayLiteralNode) *ast2.ArrayLiteralNode { + vals := make([]ast2.ValueNode, len(ar.Elements)) + for i := range ar.Elements { + vals[i] = convertASTValue(f, ar.Elements[i]) + } + commas := make([]*ast2.RuneNode, len(ar.Commas)) + for i := range ar.Commas { + commas[i] = convertASTRune(f, ar.Commas[i]) + } + return ast2.NewArrayLiteralNode( + convertASTRune(f, ar.OpenBracket), + vals, commas, + convertASTRune(f, ar.CloseBracket), + ) +} + +func convertASTMessageLiteral(f *ast.FileNode, m *ast.MessageLiteralNode) *ast2.MessageLiteralNode { + fields := make([]*ast2.MessageFieldNode, len(m.Elements)) + for i := range m.Elements { + fields[i] = convertASTMessageLiteralField(f, m.Elements[i]) + } + seps := make([]*ast2.RuneNode, len(m.Seps)) + for i := range m.Seps { + if m.Seps[i] != nil { + seps[i] = convertASTRune(f, m.Seps[i]) + } + } + return ast2.NewMessageLiteralNode( + convertASTRune(f, m.Open), + fields, seps, + convertASTRune(f, m.Close), + ) +} + +func convertASTMessageLiteralField(f *ast.FileNode, fld *ast.MessageFieldNode) *ast2.MessageFieldNode { + var sep *ast2.RuneNode + if fld.Sep != nil { + sep = convertASTRune(f, fld.Sep) + } + return ast2.NewMessageFieldNode( + convertASTFieldReference(f, fld.Name), + sep, + convertASTValue(f, fld.Val), + ) +} + +func convertASTKeyword(f *ast.FileNode, k *ast.KeywordNode) *ast2.KeywordNode { + return ast2.NewKeywordNode(k.Val, convertASTTokenInfo(f, k.Token())) +} + +func convertASTRune(f *ast.FileNode, r *ast.RuneNode) *ast2.RuneNode { + return ast2.NewRuneNode(r.Rune, convertASTTokenInfo(f, r.Token())) +} + +func convertASTTokenInfo(f *ast.FileNode, tok ast.Token) ast2.TokenInfo { + info := f.TokenInfo(tok) + return ast2.TokenInfo{ + PosRange: ast2.PosRange{ + Start: info.Start(), + End: info.End(), + }, + RawText: info.RawText(), + LeadingWhitespace: info.LeadingWhitespace(), + LeadingComments: convertASTComments(info.LeadingComments()), + TrailingComments: convertASTComments(info.TrailingComments()), + } +} + +func convertASTComments(comments ast.Comments) []ast2.Comment { + results := make([]ast2.Comment, comments.Len()) + for i := 0; i < comments.Len(); i++ { + cmt := comments.Index(i) + results[i] = ast2.Comment{ + PosRange: ast2.PosRange{ + Start: cmt.Start(), + End: cmt.End(), + }, + LeadingWhitespace: cmt.LeadingWhitespace(), + Text: cmt.RawText(), + } + } + return results +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go new file mode 100644 index 00000000..e8902000 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go @@ -0,0 +1,27 @@ +// Package ast defines types for modeling the AST (Abstract Syntax +// Tree) for the protocol buffers source language. +// +// All nodes of the tree implement the Node interface. Leaf nodes in the +// tree implement TerminalNode and all others implement CompositeNode. +// The root of the tree for a proto source file is a *FileNode. +// +// Comments are not represented as nodes in the tree. Instead, they are +// attached to all terminal nodes in the tree. So, when lexing, comments +// are accumulated until the next non-comment token is found. The AST +// model in this package thus provides access to all comments in the +// file, regardless of location (unlike the SourceCodeInfo present in +// descriptor protos, which are lossy). The comments associated with a +// a non-leaf/non-token node (i.e. a CompositeNode) come from the first +// and last nodes in its sub-tree. +// +// Creation of AST nodes should use the factory functions in this +// package instead of struct literals. Some factory functions accept +// optional arguments, which means the arguments can be nil. If nil +// values are provided for other (non-optional) arguments, the resulting +// node may be invalid and cause panics later in the program. +// +// This package defines numerous interfaces. However, user code should +// not attempt to implement any of them. Most consumers of an AST will +// not work correctly if they encounter concrete implementations other +// than the ones defined in this package. +package ast diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go new file mode 100644 index 00000000..446a6a01 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go @@ -0,0 +1,154 @@ +package ast + +import "fmt" + +// EnumNode represents an enum declaration. Example: +// +// enum Foo { BAR = 0; BAZ = 1 } +type EnumNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []EnumElement + CloseBrace *RuneNode +} + +func (*EnumNode) fileElement() {} +func (*EnumNode) msgElement() {} + +// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid enum, which must have at least one value. +// - keyword: The token corresponding to the "enum" keyword. +// - name: The token corresponding to the enum's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the enum body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid EnumElement type: %T", decl)) + } + } + + return &EnumNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + CloseBrace: closeBrace, + Decls: decls, + } +} + +// EnumElement is an interface implemented by all AST nodes that can +// appear in the body of an enum declaration. +type EnumElement interface { + Node + enumElement() +} + +var _ EnumElement = (*OptionNode)(nil) +var _ EnumElement = (*EnumValueNode)(nil) +var _ EnumElement = (*ReservedNode)(nil) +var _ EnumElement = (*EmptyDeclNode)(nil) + +// EnumValueDeclNode is a placeholder interface for AST nodes that represent +// enum values. This allows NoSourceNode to be used in place of *EnumValueNode +// for some usages. +type EnumValueDeclNode interface { + Node + GetName() Node + GetNumber() Node +} + +var _ EnumValueDeclNode = (*EnumValueNode)(nil) +var _ EnumValueDeclNode = NoSourceNode{} + +// EnumNode represents an enum declaration. Example: +// +// UNSET = 0 [deprecated = true]; +type EnumValueNode struct { + compositeNode + Name *IdentNode + Equals *RuneNode + Number IntValueNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*EnumValueNode) enumElement() {} + +// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil +// except opts which is only non-nil if the declaration included options. +// - name: The token corresponding to the enum value's name. +// - equals: The token corresponding to the '=' rune after the name. +// - number: The token corresponding to the enum value's number. +// - opts: Optional set of enum value options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if number == nil { + panic("number is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 4 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name, equals, number) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &EnumValueNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Number: number, + Options: opts, + Semicolon: semicolon, + } +} + +func (e *EnumValueNode) GetName() Node { + return e.Name +} + +func (e *EnumValueNode) GetNumber() Node { + return e.Number +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go new file mode 100644 index 00000000..7ec9391b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go @@ -0,0 +1,659 @@ +package ast + +import "fmt" + +// FieldDeclNode is a node in the AST that defines a field. This includes +// normal message fields as well as extensions. There are multiple types +// of AST nodes that declare fields: +// - *FieldNode +// - *GroupNode +// - *MapFieldNode +// - *SyntheticMapField +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type FieldDeclNode interface { + Node + FieldLabel() Node + FieldName() Node + FieldType() Node + FieldTag() Node + FieldExtendee() Node + GetGroupKeyword() Node + GetOptions() *CompactOptionsNode +} + +var _ FieldDeclNode = (*FieldNode)(nil) +var _ FieldDeclNode = (*GroupNode)(nil) +var _ FieldDeclNode = (*MapFieldNode)(nil) +var _ FieldDeclNode = (*SyntheticMapField)(nil) +var _ FieldDeclNode = NoSourceNode{} + +// FieldNode represents a normal field declaration (not groups or maps). It +// can represent extension fields as well as non-extension fields (both inside +// of messages and inside of one-ofs). Example: +// +// optional string foo = 1; +type FieldNode struct { + compositeNode + Label FieldLabel + FldType IdentValueNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode + + // This is an up-link to the containing *ExtendNode for fields + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*FieldNode) msgElement() {} +func (*FieldNode) oneOfElement() {} +func (*FieldNode) extendElement() {} + +// NewFieldNode creates a new *FieldNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - fieldType: The token corresponding to the field's type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode { + if fieldType == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, fieldType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &FieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + FldType: fieldType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *FieldNode) FieldLabel() Node { + // proto3 fields and fields inside one-ofs will not have a label and we need + // this check in order to return a nil node -- otherwise we'd return a + // non-nil node that has a nil pointer value in it :/ + if n.Label.KeywordNode == nil { + return nil + } + return n.Label.KeywordNode +} + +func (n *FieldNode) FieldName() Node { + return n.Name +} + +func (n *FieldNode) FieldType() Node { + return n.FldType +} + +func (n *FieldNode) FieldTag() Node { + return n.Tag +} + +func (n *FieldNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *FieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *FieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +// FieldLabel represents the label of a field, which indicates its cardinality +// (i.e. whether it is optional, required, or repeated). +type FieldLabel struct { + *KeywordNode + Repeated bool + Required bool +} + +func newFieldLabel(lbl *KeywordNode) FieldLabel { + repeated, required := false, false + if lbl != nil { + repeated = lbl.Val == "repeated" + required = lbl.Val == "required" + } + return FieldLabel{ + KeywordNode: lbl, + Repeated: repeated, + Required: required, + } +} + +// IsPresent returns true if a label keyword was present in the declaration +// and false if it was absent. +func (f *FieldLabel) IsPresent() bool { + return f.KeywordNode != nil +} + +// GroupNode represents a group declaration, which doubles as a field and inline +// message declaration. It can represent extension fields as well as +// non-extension fields (both inside of messages and inside of one-ofs). +// Example: +// +// optional group Key = 4 { +// optional uint64 id = 1; +// optional string name = 2; +// } +type GroupNode struct { + compositeNode + Label FieldLabel + Keyword *KeywordNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + MessageBody + + // This is an up-link to the containing *ExtendNode for groups + // that are defined inside of "extend" blocks. + Extendee *ExtendNode +} + +func (*GroupNode) msgElement() {} +func (*GroupNode) oneOfElement() {} +func (*GroupNode) extendElement() {} + +// NewGroupNode creates a new *GroupNode. The label and options arguments may be +// nil but the others must be non-nil. +// - label: The token corresponding to the label keyword if present ("optional", +// "required", or "repeated"). +// - keyword: The token corresponding to the "group" keyword. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the group body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode { + if keyword == nil { + panic("fieldType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + numChildren := 6 + len(decls) + if label != nil { + numChildren++ + } + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if label != nil { + children = append(children, label) + } + children = append(children, keyword, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &GroupNode{ + compositeNode: compositeNode{ + children: children, + }, + Label: newFieldLabel(label), + Keyword: keyword, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *GroupNode) FieldLabel() Node { + if n.Label.KeywordNode == nil { + // return nil interface to indicate absence, not a typed nil + return nil + } + return n.Label.KeywordNode +} + +func (n *GroupNode) FieldName() Node { + return n.Name +} + +func (n *GroupNode) FieldType() Node { + return n.Keyword +} + +func (n *GroupNode) FieldTag() Node { + return n.Tag +} + +func (n *GroupNode) FieldExtendee() Node { + if n.Extendee != nil { + return n.Extendee.Extendee + } + return nil +} + +func (n *GroupNode) GetGroupKeyword() Node { + return n.Keyword +} + +func (n *GroupNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *GroupNode) MessageName() Node { + return n.Name +} + +// OneOfDeclNode is a node in the AST that defines a oneof. There are +// multiple types of AST nodes that declare oneofs: +// - *OneOfNode +// - *SyntheticOneOf +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type OneOfDeclNode interface { + Node + OneOfName() Node +} + +// OneOfNode represents a one-of declaration. Example: +// +// oneof query { +// string by_name = 2; +// Type by_type = 3; +// Address by_address = 4; +// Labels by_label = 5; +// } +type OneOfNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []OneOfElement + CloseBrace *RuneNode +} + +func (*OneOfNode) msgElement() {} + +// NewOneOfNode creates a new *OneOfNode. All arguments must be non-nil. While +// it is technically allowed for decls to be nil or empty, the resulting node +// will not be a valid oneof, which must have at least one field. +// - keyword: The token corresponding to the "oneof" keyword. +// - name: The token corresponding to the oneof's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the oneof body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewOneOfNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneOfElement, closeBrace *RuneNode) *OneOfNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid OneOfElement type: %T", decl)) + } + } + + return &OneOfNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *OneOfNode) OneOfName() Node { + return n.Name +} + +// SyntheticOneOf is not an actual node in the AST but a synthetic node +// that implements OneOfDeclNode. These are used to represent the implicit +// oneof declarations that enclose "proto3 optional" fields. +type SyntheticOneOf struct { + Field *FieldNode +} + +// NewSyntheticOneOf creates a new *SyntheticOneOf that corresponds to the +// given proto3 optional field. +func NewSyntheticOneOf(field *FieldNode) *SyntheticOneOf { + return &SyntheticOneOf{Field: field} +} + +func (n *SyntheticOneOf) Start() *SourcePos { + return n.Field.Start() +} + +func (n *SyntheticOneOf) End() *SourcePos { + return n.Field.End() +} + +func (n *SyntheticOneOf) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticOneOf) OneOfName() Node { + return n.Field.FieldName() +} + +// OneOfElement is an interface implemented by all AST nodes that can +// appear in the body of a oneof declaration. +type OneOfElement interface { + Node + oneOfElement() +} + +var _ OneOfElement = (*OptionNode)(nil) +var _ OneOfElement = (*FieldNode)(nil) +var _ OneOfElement = (*GroupNode)(nil) +var _ OneOfElement = (*EmptyDeclNode)(nil) + +// MapTypeNode represents the type declaration for a map field. It defines +// both the key and value types for the map. Example: +// +// map +type MapTypeNode struct { + compositeNode + Keyword *KeywordNode + OpenAngle *RuneNode + KeyType *IdentNode + Comma *RuneNode + ValueType IdentValueNode + CloseAngle *RuneNode +} + +// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "map" keyword. +// - openAngle: The token corresponding to the "<" rune after the keyword. +// - keyType: The token corresponding to the key type for the map. +// - comma: The token corresponding to the "," rune between key and value types. +// - valType: The token corresponding to the value type for the map. +// - closeAngle: The token corresponding to the ">" rune that ends the declaration. +func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode { + if keyword == nil { + panic("keyword is nil") + } + if openAngle == nil { + panic("openAngle is nil") + } + if keyType == nil { + panic("keyType is nil") + } + if comma == nil { + panic("comma is nil") + } + if valType == nil { + panic("valType is nil") + } + if closeAngle == nil { + panic("closeAngle is nil") + } + children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle} + return &MapTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + OpenAngle: openAngle, + KeyType: keyType, + Comma: comma, + ValueType: valType, + CloseAngle: closeAngle, + } +} + +// MapFieldNode represents a map field declaration. Example: +// +// map replacements = 3 [deprecated = true]; +type MapFieldNode struct { + compositeNode + MapType *MapTypeNode + Name *IdentNode + Equals *RuneNode + Tag *UintLiteralNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (*MapFieldNode) msgElement() {} + +// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil +// except opts, which may be nil. +// - mapType: The token corresponding to the map type. +// - name: The token corresponding to the field's name. +// - equals: The token corresponding to the '=' rune after the name. +// - tag: The token corresponding to the field's tag number. +// - opts: Optional set of field options. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode { + if mapType == nil { + panic("mapType is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 5 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, mapType, name, equals, tag) + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + + return &MapFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + MapType: mapType, + Name: name, + Equals: equals, + Tag: tag, + Options: opts, + Semicolon: semicolon, + } +} + +func (n *MapFieldNode) FieldLabel() Node { + return nil +} + +func (n *MapFieldNode) FieldName() Node { + return n.Name +} + +func (n *MapFieldNode) FieldType() Node { + return n.MapType +} + +func (n *MapFieldNode) FieldTag() Node { + return n.Tag +} + +func (n *MapFieldNode) FieldExtendee() Node { + return nil +} + +func (n *MapFieldNode) GetGroupKeyword() Node { + return nil +} + +func (n *MapFieldNode) GetOptions() *CompactOptionsNode { + return n.Options +} + +func (n *MapFieldNode) MessageName() Node { + return n.Name +} + +func (n *MapFieldNode) KeyField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.KeyType, 1) +} + +func (n *MapFieldNode) ValueField() *SyntheticMapField { + return NewSyntheticMapField(n.MapType.ValueType, 2) +} + +// SyntheticMapField is not an actual node in the AST but a synthetic node +// that implements FieldDeclNode. These are used to represent the implicit +// field declarations of the "key" and "value" fields in a map entry. +type SyntheticMapField struct { + Ident IdentValueNode + Tag *UintLiteralNode +} + +// NewSyntheticMapField creates a new *SyntheticMapField for the given +// identifier (either a key or value type in a map declaration) and tag +// number (1 for key, 2 for value). +func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField { + tag := &UintLiteralNode{ + terminalNode: terminalNode{ + posRange: PosRange{Start: *ident.Start(), End: *ident.End()}, + }, + Val: tagNum, + } + return &SyntheticMapField{Ident: ident, Tag: tag} +} + +func (n *SyntheticMapField) Start() *SourcePos { + return n.Ident.Start() +} + +func (n *SyntheticMapField) End() *SourcePos { + return n.Ident.End() +} + +func (n *SyntheticMapField) LeadingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) TrailingComments() []Comment { + return nil +} + +func (n *SyntheticMapField) FieldLabel() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldName() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldType() Node { + return n.Ident +} + +func (n *SyntheticMapField) FieldTag() Node { + return n.Tag +} + +func (n *SyntheticMapField) FieldExtendee() Node { + return nil +} + +func (n *SyntheticMapField) GetGroupKeyword() Node { + return nil +} + +func (n *SyntheticMapField) GetOptions() *CompactOptionsNode { + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go new file mode 100644 index 00000000..332cb0c3 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go @@ -0,0 +1,236 @@ +package ast + +import "fmt" + +// FileDeclNode is a placeholder interface for AST nodes that represent files. +// This allows NoSourceNode to be used in place of *FileNode for some usages. +type FileDeclNode interface { + Node + GetSyntax() Node +} + +var _ FileDeclNode = (*FileNode)(nil) +var _ FileDeclNode = NoSourceNode{} + +// FileNode is the root of the AST hierarchy. It represents an entire +// protobuf source file. +type FileNode struct { + compositeNode + Syntax *SyntaxNode // nil if file has no syntax declaration + Decls []FileElement + + // TODO: add Edition *EditionNode + + // Any comments that follow the last token in the file. + FinalComments []Comment + // Any whitespace at the end of the file (after the last token or + // last comment in the file). + FinalWhitespace string +} + +// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it +// is absent, it means the file had no syntax declaration. +// +// This function panics if the concrete type of any element of decls is not +// from this package. +func NewFileNode(syntax *SyntaxNode, decls []FileElement) *FileNode { + numChildren := len(decls) + if syntax != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + if syntax != nil { + children = append(children, syntax) + } + for _, decl := range decls { + children = append(children, decl) + } + + for _, decl := range decls { + switch decl := decl.(type) { + case *PackageNode, *ImportNode, *OptionNode, *MessageNode, + *EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid FileElement type: %T", decl)) + } + } + + return &FileNode{ + compositeNode: compositeNode{ + children: children, + }, + Syntax: syntax, + Decls: decls, + } +} + +func NewEmptyFileNode(filename string) *FileNode { + return &FileNode{ + compositeNode: compositeNode{ + children: []Node{NewNoSourceNode(filename)}, + }, + } +} + +func (f *FileNode) GetSyntax() Node { + return f.Syntax +} + +// FileElement is an interface implemented by all AST nodes that are +// allowed as top-level declarations in the file. +type FileElement interface { + Node + fileElement() +} + +var _ FileElement = (*ImportNode)(nil) +var _ FileElement = (*PackageNode)(nil) +var _ FileElement = (*OptionNode)(nil) +var _ FileElement = (*MessageNode)(nil) +var _ FileElement = (*EnumNode)(nil) +var _ FileElement = (*ExtendNode)(nil) +var _ FileElement = (*ServiceNode)(nil) +var _ FileElement = (*EmptyDeclNode)(nil) + +// SyntaxNode represents a syntax declaration, which if present must be +// the first non-comment content. Example: +// +// syntax = "proto2"; +// +// Files that don't have a syntax node are assumed to use proto2 syntax. +type SyntaxNode struct { + compositeNode + Keyword *KeywordNode + Equals *RuneNode + Syntax StringValueNode + Semicolon *RuneNode +} + +// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil: +// - keyword: The token corresponding to the "syntax" keyword. +// - equals: The token corresponding to the "=" rune. +// - syntax: The actual syntax value, e.g. "proto2" or "proto3". +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode { + if keyword == nil { + panic("keyword is nil") + } + if equals == nil { + panic("equals is nil") + } + if syntax == nil { + panic("syntax is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, equals, syntax, semicolon} + return &SyntaxNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Equals: equals, + Syntax: syntax, + Semicolon: semicolon, + } +} + +// ImportNode represents an import statement. Example: +// +// import "google/protobuf/empty.proto"; +type ImportNode struct { + compositeNode + Keyword *KeywordNode + // Optional; if present indicates this is a public import + Public *KeywordNode + // Optional; if present indicates this is a weak import + Weak *KeywordNode + Name StringValueNode + Semicolon *RuneNode +} + +// NewImportNode creates a new *ImportNode. The public and weak arguments are optional +// and only one or the other (or neither) may be specified, not both. When public is +// non-nil, it indicates the "public" keyword in the import statement and means this is +// a public import. When weak is non-nil, it indicates the "weak" keyword in the import +// statement and means this is a weak import. When both are nil, this is a normal import. +// The other arguments must be non-nil: +// - keyword: The token corresponding to the "import" keyword. +// - public: The token corresponding to the optional "public" keyword. +// - weak: The token corresponding to the optional "weak" keyword. +// - name: The actual imported file name. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + numChildren := 3 + if public != nil || weak != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + if public != nil { + children = append(children, public) + } else if weak != nil { + children = append(children, weak) + } + children = append(children, name, semicolon) + + return &ImportNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Public: public, + Weak: weak, + Name: name, + Semicolon: semicolon, + } +} + +func (*ImportNode) fileElement() {} + +// PackageNode represents a package declaration. Example: +// +// package foobar.com; +type PackageNode struct { + compositeNode + Keyword *KeywordNode + Name IdentValueNode + Semicolon *RuneNode +} + +func (*PackageNode) fileElement() {} + +// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil: +// - keyword: The token corresponding to the "package" keyword. +// - name: The package name declared for the file. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, semicolon} + return &PackageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go new file mode 100644 index 00000000..ed97e973 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go @@ -0,0 +1,134 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Identifier is a possibly-qualified name. This is used to distinguish +// ValueNode values that are references/identifiers vs. those that are +// string literals. +type Identifier string + +// IdentValueNode is an AST node that represents an identifier. +type IdentValueNode interface { + ValueNode + AsIdentifier() Identifier +} + +var _ IdentValueNode = (*IdentNode)(nil) +var _ IdentValueNode = (*CompoundIdentNode)(nil) + +// IdentNode represents a simple, unqualified identifier. These are used to name +// elements declared in a protobuf file or to refer to elements. Example: +// +// foobar +type IdentNode struct { + terminalNode + Val string +} + +// NewIdentNode creates a new *IdentNode. The given val is the identifier text. +func NewIdentNode(val string, info TokenInfo) *IdentNode { + return &IdentNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *IdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *IdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// ToKeyword is used to convert identifiers to keywords. Since keywords are not +// reserved in the protobuf language, they are initially lexed as identifiers +// and then converted to keywords based on context. +func (n *IdentNode) ToKeyword() *KeywordNode { + return (*KeywordNode)(n) +} + +// CompoundIdentNode represents a qualified identifier. A qualified identifier +// has at least one dot and possibly multiple identifier names (all separated by +// dots). If the identifier has a leading dot, then it is a *fully* qualified +// identifier. Example: +// +// .com.foobar.Baz +type CompoundIdentNode struct { + compositeNode + // Optional leading dot, indicating that the identifier is fully qualified. + LeadingDot *RuneNode + Components []*IdentNode + // Dots[0] is the dot after Components[0]. The length of Dots is always + // one less than the length of Components. + Dots []*RuneNode + // The text value of the identifier, with all components and dots + // concatenated. + Val string +} + +// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil. +// The dots arg must have a length that is one less than the length of +// components. The components arg must not be empty. +func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode { + if len(components) == 0 { + panic("must have at least one component") + } + if len(dots) != len(components)-1 { + panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots))) + } + numChildren := len(components)*2 - 1 + if leadingDot != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + var b strings.Builder + if leadingDot != nil { + children = append(children, leadingDot) + b.WriteRune(leadingDot.Rune) + } + for i, comp := range components { + if i > 0 { + dot := dots[i-1] + children = append(children, dot) + b.WriteRune(dot.Rune) + } + children = append(children, comp) + b.WriteString(comp.Val) + } + return &CompoundIdentNode{ + compositeNode: compositeNode{ + children: children, + }, + LeadingDot: leadingDot, + Components: components, + Dots: dots, + Val: b.String(), + } +} + +func (n *CompoundIdentNode) Value() interface{} { + return n.AsIdentifier() +} + +func (n *CompoundIdentNode) AsIdentifier() Identifier { + return Identifier(n.Val) +} + +// KeywordNode is an AST node that represents a keyword. Keywords are +// like identifiers, but they have special meaning in particular contexts. +// Example: +// +// message +type KeywordNode IdentNode + +// NewKeywordNode creates a new *KeywordNode. The given val is the keyword. +func NewKeywordNode(val string, info TokenInfo) *KeywordNode { + return &KeywordNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go new file mode 100644 index 00000000..c98b0f81 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go @@ -0,0 +1,199 @@ +package ast + +import "fmt" + +// MessageDeclNode is a node in the AST that defines a message type. This +// includes normal message fields as well as implicit messages: +// - *MessageNode +// - *GroupNode (the group is a field and inline message type) +// - *MapFieldNode (map fields implicitly define a MapEntry message type) +// +// This also allows NoSourceNode to be used in place of one of the above +// for some usages. +type MessageDeclNode interface { + Node + MessageName() Node +} + +var _ MessageDeclNode = (*MessageNode)(nil) +var _ MessageDeclNode = (*GroupNode)(nil) +var _ MessageDeclNode = (*MapFieldNode)(nil) +var _ MessageDeclNode = NoSourceNode{} + +// MessageNode represents a message declaration. Example: +// +// message Foo { +// string name = 1; +// repeated string labels = 2; +// bytes extra = 3; +// } +type MessageNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + MessageBody +} + +func (*MessageNode) fileElement() {} +func (*MessageNode) msgElement() {} + +// NewMessageNode creates a new *MessageNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "message" keyword. +// - name: The token corresponding to the field's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &MessageNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + } + populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace) + return ret +} + +func (n *MessageNode) MessageName() Node { + return n.Name +} + +// MessageBody represents the body of a message. It is used by both +// MessageNodes and GroupNodes. +type MessageBody struct { + OpenBrace *RuneNode + Decls []MessageElement + CloseBrace *RuneNode +} + +func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) { + m.OpenBrace = openBrace + m.Decls = decls + for _, decl := range decls { + switch decl.(type) { + case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneOfNode, + *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode, + *ReservedNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid MessageElement type: %T", decl)) + } + } + m.CloseBrace = closeBrace +} + +// MessageElement is an interface implemented by all AST nodes that can +// appear in a message body. +type MessageElement interface { + Node + msgElement() +} + +var _ MessageElement = (*OptionNode)(nil) +var _ MessageElement = (*FieldNode)(nil) +var _ MessageElement = (*MapFieldNode)(nil) +var _ MessageElement = (*OneOfNode)(nil) +var _ MessageElement = (*GroupNode)(nil) +var _ MessageElement = (*MessageNode)(nil) +var _ MessageElement = (*EnumNode)(nil) +var _ MessageElement = (*ExtendNode)(nil) +var _ MessageElement = (*ExtensionRangeNode)(nil) +var _ MessageElement = (*ReservedNode)(nil) +var _ MessageElement = (*EmptyDeclNode)(nil) + +// ExtendNode represents a declaration of extension fields. Example: +// +// extend google.protobuf.FieldOptions { +// bool redacted = 33333; +// } +type ExtendNode struct { + compositeNode + Keyword *KeywordNode + Extendee IdentValueNode + OpenBrace *RuneNode + Decls []ExtendElement + CloseBrace *RuneNode +} + +func (*ExtendNode) fileElement() {} +func (*ExtendNode) msgElement() {} + +// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "extend" keyword. +// - extendee: The token corresponding to the name of the extended message. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the message body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode { + if keyword == nil { + panic("keyword is nil") + } + if extendee == nil { + panic("extendee is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, extendee, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + ret := &ExtendNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Extendee: extendee, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } + for _, decl := range decls { + switch decl := decl.(type) { + case *FieldNode: + decl.Extendee = ret + case *GroupNode: + decl.Extendee = ret + case *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ExtendElement type: %T", decl)) + } + } + return ret +} + +// ExtendElement is an interface implemented by all AST nodes that can +// appear in the body of an extends declaration. +type ExtendElement interface { + Node + extendElement() +} + +var _ ExtendElement = (*FieldNode)(nil) +var _ ExtendElement = (*GroupNode)(nil) +var _ ExtendElement = (*EmptyDeclNode)(nil) diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go new file mode 100644 index 00000000..44e02b10 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go @@ -0,0 +1,103 @@ +package ast + +// UnknownPos is a placeholder position when only the source file +// name is known. +func UnknownPos(filename string) *SourcePos { + return &SourcePos{Filename: filename} +} + +// NoSourceNode is a placeholder AST node that implements numerous +// interfaces in this package. It can be used to represent an AST +// element for a file whose source is not available. +type NoSourceNode struct { + pos *SourcePos +} + +// NewNoSourceNode creates a new NoSourceNode for the given filename. +func NewNoSourceNode(filename string) NoSourceNode { + return NoSourceNode{pos: UnknownPos(filename)} +} + +func (n NoSourceNode) Start() *SourcePos { + return n.pos +} + +func (n NoSourceNode) End() *SourcePos { + return n.pos +} + +func (n NoSourceNode) LeadingComments() []Comment { + return nil +} + +func (n NoSourceNode) TrailingComments() []Comment { + return nil +} + +func (n NoSourceNode) GetSyntax() Node { + return n +} + +func (n NoSourceNode) GetName() Node { + return n +} + +func (n NoSourceNode) GetValue() ValueNode { + return n +} + +func (n NoSourceNode) FieldLabel() Node { + return n +} + +func (n NoSourceNode) FieldName() Node { + return n +} + +func (n NoSourceNode) FieldType() Node { + return n +} + +func (n NoSourceNode) FieldTag() Node { + return n +} + +func (n NoSourceNode) FieldExtendee() Node { + return n +} + +func (n NoSourceNode) GetGroupKeyword() Node { + return n +} + +func (n NoSourceNode) GetOptions() *CompactOptionsNode { + return nil +} + +func (n NoSourceNode) RangeStart() Node { + return n +} + +func (n NoSourceNode) RangeEnd() Node { + return n +} + +func (n NoSourceNode) GetNumber() Node { + return n +} + +func (n NoSourceNode) MessageName() Node { + return n +} + +func (n NoSourceNode) GetInputType() Node { + return n +} + +func (n NoSourceNode) GetOutputType() Node { + return n +} + +func (n NoSourceNode) Value() interface{} { + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go new file mode 100644 index 00000000..a2a8a3b2 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go @@ -0,0 +1,200 @@ +package ast + +// Node is the interface implemented by all nodes in the AST. It +// provides information about the span of this AST node in terms +// of location in the source file. It also provides information +// about all prior comments (attached as leading comments) and +// optional subsequent comments (attached as trailing comments). +type Node interface { + Start() *SourcePos + End() *SourcePos + LeadingComments() []Comment + TrailingComments() []Comment +} + +// TerminalNode represents a leaf in the AST. These represent +// the tokens/lexemes in the protobuf language. Comments and +// whitespace are accumulated by the lexer and associated with +// the following lexed token. +type TerminalNode interface { + Node + // PopLeadingComment removes the first leading comment from this + // token and returns it. If the node has no leading comments then + // this method will panic. + PopLeadingComment() Comment + // PushTrailingComment appends the given comment to the token's + // trailing comments. + PushTrailingComment(Comment) + // LeadingWhitespace returns any whitespace between the prior comment + // (last leading comment), if any, or prior lexed token and this token. + LeadingWhitespace() string + // RawText returns the raw text of the token as read from the source. + RawText() string +} + +var _ TerminalNode = (*StringLiteralNode)(nil) +var _ TerminalNode = (*UintLiteralNode)(nil) +var _ TerminalNode = (*FloatLiteralNode)(nil) +var _ TerminalNode = (*IdentNode)(nil) +var _ TerminalNode = (*BoolLiteralNode)(nil) +var _ TerminalNode = (*SpecialFloatLiteralNode)(nil) +var _ TerminalNode = (*KeywordNode)(nil) +var _ TerminalNode = (*RuneNode)(nil) + +// TokenInfo represents state accumulated by the lexer to associated with a +// token (aka terminal node). +type TokenInfo struct { + // The location of the token in the source file. + PosRange + // The raw text of the token. + RawText string + // Any comments encountered preceding this token. + LeadingComments []Comment + // Any leading whitespace immediately preceding this token. + LeadingWhitespace string + // Any trailing comments following this token. This is usually + // empty as tokens are created by the lexer immediately and + // trailing comments are accounted for afterwards, added using + // the node's PushTrailingComment method. + TrailingComments []Comment +} + +func (t *TokenInfo) asTerminalNode() terminalNode { + return terminalNode{ + posRange: t.PosRange, + leadingComments: t.LeadingComments, + leadingWhitespace: t.LeadingWhitespace, + trailingComments: t.TrailingComments, + raw: t.RawText, + } +} + +// CompositeNode represents any non-terminal node in the tree. These +// are interior or root nodes and have child nodes. +type CompositeNode interface { + Node + // All AST nodes that are immediate children of this one. + Children() []Node +} + +// terminalNode contains book-keeping shared by all TerminalNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the TerminalNode +// interface. +type terminalNode struct { + posRange PosRange + leadingComments []Comment + leadingWhitespace string + trailingComments []Comment + raw string +} + +func (n *terminalNode) Start() *SourcePos { + return &n.posRange.Start +} + +func (n *terminalNode) End() *SourcePos { + return &n.posRange.End +} + +func (n *terminalNode) LeadingComments() []Comment { + return n.leadingComments +} + +func (n *terminalNode) TrailingComments() []Comment { + return n.trailingComments +} + +func (n *terminalNode) PopLeadingComment() Comment { + c := n.leadingComments[0] + n.leadingComments = n.leadingComments[1:] + return c +} + +func (n *terminalNode) PushTrailingComment(c Comment) { + n.trailingComments = append(n.trailingComments, c) +} + +func (n *terminalNode) LeadingWhitespace() string { + return n.leadingWhitespace +} + +func (n *terminalNode) RawText() string { + return n.raw +} + +// compositeNode contains book-keeping shared by all CompositeNode +// implementations. It is embedded in all such node types in this +// package. It provides the implementation of the CompositeNode +// interface. +type compositeNode struct { + children []Node +} + +func (n *compositeNode) Children() []Node { + return n.children +} + +func (n *compositeNode) Start() *SourcePos { + return n.children[0].Start() +} + +func (n *compositeNode) End() *SourcePos { + return n.children[len(n.children)-1].End() +} + +func (n *compositeNode) LeadingComments() []Comment { + return n.children[0].LeadingComments() +} + +func (n *compositeNode) TrailingComments() []Comment { + return n.children[len(n.children)-1].TrailingComments() +} + +// RuneNode represents a single rune in protobuf source. Runes +// are typically collected into tokens, but some runes stand on +// their own, such as punctuation/symbols like commas, semicolons, +// equals signs, open and close symbols (braces, brackets, angles, +// and parentheses), and periods/dots. +type RuneNode struct { + terminalNode + Rune rune +} + +// NewRuneNode creates a new *RuneNode with the given properties. +func NewRuneNode(r rune, info TokenInfo) *RuneNode { + return &RuneNode{ + terminalNode: info.asTerminalNode(), + Rune: r, + } +} + +// EmptyDeclNode represents an empty declaration in protobuf source. +// These amount to extra semicolons, with no actual content preceding +// the semicolon. +type EmptyDeclNode struct { + compositeNode + Semicolon *RuneNode +} + +// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must +// be non-nil. +func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode { + if semicolon == nil { + panic("semicolon is nil") + } + return &EmptyDeclNode{ + compositeNode: compositeNode{ + children: []Node{semicolon}, + }, + Semicolon: semicolon, + } +} + +func (e *EmptyDeclNode) fileElement() {} +func (e *EmptyDeclNode) msgElement() {} +func (e *EmptyDeclNode) extendElement() {} +func (e *EmptyDeclNode) oneOfElement() {} +func (e *EmptyDeclNode) enumElement() {} +func (e *EmptyDeclNode) serviceElement() {} +func (e *EmptyDeclNode) methodElement() {} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go new file mode 100644 index 00000000..c4ed169c --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go @@ -0,0 +1,361 @@ +package ast + +import "fmt" + +// OptionDeclNode is a placeholder interface for AST nodes that represent +// options. This allows NoSourceNode to be used in place of *OptionNode +// for some usages. +type OptionDeclNode interface { + Node + GetName() Node + GetValue() ValueNode +} + +var _ OptionDeclNode = (*OptionNode)(nil) +var _ OptionDeclNode = NoSourceNode{} + +// OptionNode represents the declaration of a single option for an element. +// It is used both for normal option declarations (start with "option" keyword +// and end with semicolon) and for compact options found in fields, enum values, +// and extension ranges. Example: +// +// option (custom.option) = "foo"; +type OptionNode struct { + compositeNode + Keyword *KeywordNode // absent for compact options + Name *OptionNameNode + Equals *RuneNode + Val ValueNode + Semicolon *RuneNode // absent for compact options +} + +func (e *OptionNode) fileElement() {} +func (e *OptionNode) msgElement() {} +func (e *OptionNode) oneOfElement() {} +func (e *OptionNode) enumElement() {} +func (e *OptionNode) serviceElement() {} +func (e *OptionNode) methodElement() {} + +// NewOptionNode creates a new *OptionNode for a full option declaration (as +// used in files, messages, oneofs, enums, services, and methods). All arguments +// must be non-nil. (Also see NewCompactOptionNode.) +// - keyword: The token corresponding to the "option" keyword. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, equals, val, semicolon} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Equals: equals, + Val: val, + Semicolon: semicolon, + } +} + +// NewCompactOptionNode creates a new *OptionNode for a full compact declaration +// (as used in fields, enum values, and extension ranges). All arguments must be +// non-nil. +// - name: The token corresponding to the name of the option. +// - equals: The token corresponding to the "=" rune after the name. +// - val: The token corresponding to the option value. +func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode { + if name == nil { + panic("name is nil") + } + if equals == nil { + panic("equals is nil") + } + if val == nil { + panic("val is nil") + } + children := []Node{name, equals, val} + return &OptionNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Equals: equals, + Val: val, + } +} + +func (n *OptionNode) GetName() Node { + return n.Name +} + +func (n *OptionNode) GetValue() ValueNode { + return n.Val +} + +// OptionNameNode represents an option name or even a traversal through message +// types to name a nested option field. Example: +// +// (foo.bar).baz.(bob) +type OptionNameNode struct { + compositeNode + Parts []*FieldReferenceNode + // Dots represent the separating '.' characters between name parts. The + // length of this slice must be exactly len(Parts)-1, each item in Parts + // having a corresponding item in this slice *except the last* (since a + // trailing dot is not allowed). + // + // These do *not* include dots that are inside of an extension name. For + // example: (foo.bar).baz.(bob) has three parts: + // 1. (foo.bar) - an extension name + // 2. baz - a regular field in foo.bar + // 3. (bob) - an extension field in baz + // Note that the dot in foo.bar will thus not be present in Dots but is + // instead in Parts[0]. + Dots []*RuneNode +} + +// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a +// length that is one less than the length of parts. The parts arg must not be +// empty. +func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode { + if len(parts) == 0 { + panic("must have at least one part") + } + if len(dots) != len(parts)-1 { + panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots))) + } + children := make([]Node, 0, len(parts)*2-1) + for i, part := range parts { + if part == nil { + panic(fmt.Sprintf("parts[%d] is nil", i)) + } + if i > 0 { + if dots[i-1] == nil { + panic(fmt.Sprintf("dots[%d] is nil", i-1)) + } + children = append(children, dots[i-1]) + } + children = append(children, part) + } + return &OptionNameNode{ + compositeNode: compositeNode{ + children: children, + }, + Parts: parts, + Dots: dots, + } +} + +// FieldReferenceNode is a reference to a field name. It can indicate a regular +// field (simple unqualified name), an extension field (possibly-qualified name +// that is enclosed either in brackets or parentheses), or an "any" type +// reference (a type URL in the form "server.host/fully.qualified.Name" that is +// enclosed in brackets). +// +// Extension names are used in options to refer to custom options (which are +// actually extensions), in which case the name is enclosed in parentheses "(" +// and ")". They can also be used to refer to extension fields of options. +// +// Extension names are also used in message literals to set extension fields, +// in which case the name is enclosed in square brackets "[" and "]". +// +// "Any" type references can only be used in message literals, and are not +// allowed in option names. They are always enclosed in square brackets. An +// "any" type reference is distinguished from an extension name by the presence +// of a slash, which must be present in an "any" type reference and must be +// absent in an extension name. +// +// Examples: +// +// foobar +// (foo.bar) +// [foo.bar] +// [type.googleapis.com/foo.bar] +type FieldReferenceNode struct { + compositeNode + Open *RuneNode // only present for extension names and "any" type references + + // only present for "any" type references + UrlPrefix IdentValueNode + Slash *RuneNode + + Name IdentValueNode + + Close *RuneNode // only present for extension names and "any" type references +} + +// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field. +// The name arg must not be nil. +func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + children := []Node{name} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + } +} + +// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an +// extension field. All args must be non-nil. The openSym and closeSym runes +// should be "(" and ")" or "[" and "]". +func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + children := []Node{openSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Name: name, + Close: closeSym, + } +} + +// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any" +// type reference. All args must be non-nil. The openSym and closeSym runes +// should be "[" and "]". The slashSym run should be "/". +func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if urlPrefix == nil { + panic("urlPrefix is nil") + } + if slashSym == nil { + panic("slashSym is nil") + } + children := []Node{openSym, urlPrefix, slashSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + UrlPrefix: urlPrefix, + Slash: slashSym, + Name: name, + Close: closeSym, + } +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsExtension() bool { + return a.Open != nil && a.Slash == nil +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsAnyTypeReference() bool { + return a.Slash != nil +} + +func (a *FieldReferenceNode) Value() string { + if a.Open != nil { + if a.Slash != nil { + return string(a.Open.Rune) + string(a.UrlPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } + return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } else { + return string(a.Name.AsIdentifier()) + } +} + +// CompactOptionsNode represents a compact options declaration, as used with +// fields, enum values, and extension ranges. Example: +// +// [deprecated = true, json_name = "foo_bar"] +type CompactOptionsNode struct { + compositeNode + OpenBracket *RuneNode + Options []*OptionNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Options)-1, with each item + // in Options having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be +// non-nil. The commas arg must have a length that is one less than the +// length of opts. The opts arg must not be empty. +func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(opts) == 0 { + panic("must have at least one part") + } + if len(commas) != len(opts)-1 { + panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas))) + } + children := make([]Node, 0, len(opts)*2+1) + children = append(children, openBracket) + for i, opt := range opts { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if opt == nil { + panic(fmt.Sprintf("opts[%d] is nil", i)) + } + children = append(children, opt) + } + children = append(children, closeBracket) + + return &CompactOptionsNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Options: opts, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (e *CompactOptionsNode) GetElements() []*OptionNode { + if e == nil { + return nil + } + return e.Options +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go new file mode 100644 index 00000000..271200c7 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go @@ -0,0 +1,86 @@ +package ast + +import "io" + +// Print prints the given AST node to the given output. This operation +// basically walks the AST and, for each TerminalNode, prints the node's +// leading comments, leading whitespace, the node's raw text, and then +// any trailing comments. If the given node is a *FileNode, it will then +// also print the file's FinalComments and FinalWhitespace. +func Print(w io.Writer, node Node) error { + sw, ok := w.(stringWriter) + if !ok { + sw = &strWriter{w} + } + var err error + Walk(node, func(n Node) (bool, VisitFunc) { + if err != nil { + return false, nil + } + token, ok := n.(TerminalNode) + if !ok { + return true, nil + } + + err = printComments(sw, token.LeadingComments()) + if err != nil { + return false, nil + } + + _, err = sw.WriteString(token.LeadingWhitespace()) + if err != nil { + return false, nil + } + + _, err = sw.WriteString(token.RawText()) + if err != nil { + return false, nil + } + + err = printComments(sw, token.TrailingComments()) + return false, nil + }) + if err != nil { + return err + } + + if file, ok := node.(*FileNode); ok { + err = printComments(sw, file.FinalComments) + if err != nil { + return err + } + _, err = sw.WriteString(file.FinalWhitespace) + return err + } + + return nil +} + +func printComments(sw stringWriter, comments []Comment) error { + for _, comment := range comments { + if _, err := sw.WriteString(comment.LeadingWhitespace); err != nil { + return err + } + if _, err := sw.WriteString(comment.Text); err != nil { + return err + } + } + return nil +} + +// many io.Writer impls also provide a string-based method +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// adapter, in case the given writer does NOT provide a string-based method +type strWriter struct { + io.Writer +} + +func (s *strWriter) WriteString(str string) (int, error) { + if str == "" { + return 0, nil + } + return s.Write([]byte(str)) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go new file mode 100644 index 00000000..cdd78baf --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go @@ -0,0 +1,305 @@ +package ast + +import "fmt" + +// ExtensionRangeNode represents an extension range declaration in an extendable +// message. Example: +// +// extensions 100 to max; +type ExtensionRangeNode struct { + compositeNode + Keyword *KeywordNode + Ranges []*RangeNode + // Commas represent the separating ',' characters between ranges. The + // length of this slice must be exactly len(Ranges)-1, each item in Ranges + // having a corresponding item in this slice *except the last* (since a + // trailing comma is not allowed). + Commas []*RuneNode + Options *CompactOptionsNode + Semicolon *RuneNode +} + +func (e *ExtensionRangeNode) msgElement() {} + +// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be +// non-nil except opts, which may be nil. +// - keyword: The token corresponding to the "extends" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - opts: The node corresponding to options that apply to each of the ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + numChildren := len(ranges)*2 + 1 + if opts != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + if opts != nil { + children = append(children, opts) + } + children = append(children, semicolon) + return &ExtensionRangeNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Options: opts, + Semicolon: semicolon, + } +} + +// RangeDeclNode is a placeholder interface for AST nodes that represent +// numeric values. This allows NoSourceNode to be used in place of *RangeNode +// for some usages. +type RangeDeclNode interface { + Node + RangeStart() Node + RangeEnd() Node +} + +var _ RangeDeclNode = (*RangeNode)(nil) +var _ RangeDeclNode = NoSourceNode{} + +// RangeNode represents a range expression, used in both extension ranges and +// reserved ranges. Example: +// +// 1000 to max +type RangeNode struct { + compositeNode + StartVal IntValueNode + // if To is non-nil, then exactly one of EndVal or Max must also be non-nil + To *KeywordNode + // EndVal and Max are mutually exclusive + EndVal IntValueNode + Max *KeywordNode +} + +// NewRangeNode creates a new *RangeNode. The start argument must be non-nil. +// The to argument represents the "to" keyword. If present (i.e. if it is non-nil), +// then so must be exactly one of end or max. If max is non-nil, it indicates a +// "100 to max" style range. But if end is non-nil, the end of the range is a +// literal, such as "100 to 200". +func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, max *KeywordNode) *RangeNode { + if start == nil { + panic("start is nil") + } + numChildren := 1 + if to != nil { + if end == nil && max == nil { + panic("to is not nil, but end and max both are") + } + if end != nil && max != nil { + panic("end and max cannot be both non-nil") + } + numChildren = 3 + } else { + if end != nil { + panic("to is nil, but end is not") + } + if max != nil { + panic("to is nil, but max is not") + } + } + children := make([]Node, 0, numChildren) + children = append(children, start) + if to != nil { + children = append(children, to) + if end != nil { + children = append(children, end) + } else { + children = append(children, max) + } + } + return &RangeNode{ + compositeNode: compositeNode{ + children: children, + }, + StartVal: start, + To: to, + EndVal: end, + Max: max, + } +} + +func (n *RangeNode) RangeStart() Node { + return n.StartVal +} + +func (n *RangeNode) RangeEnd() Node { + if n.Max != nil { + return n.Max + } + if n.EndVal != nil { + return n.EndVal + } + return n.StartVal +} + +func (n *RangeNode) StartValue() interface{} { + return n.StartVal.Value() +} + +func (n *RangeNode) StartValueAsInt32(min, max int32) (int32, bool) { + return AsInt32(n.StartVal, min, max) +} + +func (n *RangeNode) EndValue() interface{} { + if n.EndVal == nil { + return nil + } + return n.EndVal.Value() +} + +func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) { + if n.Max != nil { + return max, true + } + if n.EndVal == nil { + return n.StartValueAsInt32(min, max) + } + return AsInt32(n.EndVal, min, max) +} + +// ReservedNode represents reserved declaration, which can be used to reserve +// either names or numbers. Examples: +// +// reserved 1, 10-12, 15; +// reserved "foo", "bar", "baz"; +type ReservedNode struct { + compositeNode + Keyword *KeywordNode + // If non-empty, this node represents reserved ranges and Names will be empty. + Ranges []*RangeNode + // If non-empty, this node represents reserved names and Ranges will be empty. + Names []StringValueNode + // Commas represent the separating ',' characters between options. The + // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending + // on whether this node represents reserved ranges or reserved names. Each item + // in Ranges or Names has a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + Semicolon *RuneNode +} + +func (*ReservedNode) msgElement() {} +func (*ReservedNode) enumElement() {} + +// NewReservedRangesNode creates a new *ReservedNode that represents reserved +// numeric ranges. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - ranges: One or more range expressions. +// - commas: Tokens that represent the "," runes that delimit the range expressions. +// The length of commas must be one less than the length of ranges. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(ranges) == 0 { + panic("must have at least one range") + } + if len(commas) != len(ranges)-1 { + panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas))) + } + children := make([]Node, 0, len(ranges)*2+1) + children = append(children, keyword) + for i, rng := range ranges { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if rng == nil { + panic(fmt.Sprintf("ranges[%d] is nil", i)) + } + children = append(children, rng) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Ranges: ranges, + Commas: commas, + Semicolon: semicolon, + } +} + +// NewReservedNamesNode creates a new *ReservedNode that represents reserved +// names. All args must be non-nil. +// - keyword: The token corresponding to the "reserved" keyword. +// - names: One or more names. +// - commas: Tokens that represent the "," runes that delimit the names. +// The length of commas must be one less than the length of names. +// - semicolon The token corresponding to the ";" rune that ends the declaration. +func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { + if keyword == nil { + panic("keyword is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + if len(names) == 0 { + panic("must have at least one name") + } + if len(commas) != len(names)-1 { + panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) + } + children := make([]Node, 0, len(names)*2+1) + children = append(children, keyword) + for i, name := range names { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if name == nil { + panic(fmt.Sprintf("names[%d] is nil", i)) + } + children = append(children, name) + } + children = append(children, semicolon) + return &ReservedNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Names: names, + Commas: commas, + Semicolon: semicolon, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go new file mode 100644 index 00000000..739b29cc --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go @@ -0,0 +1,273 @@ +package ast + +import "fmt" + +// ServiceNode represents a service declaration. Example: +// +// service Foo { +// rpc Bar (Baz) returns (Bob); +// rpc Frobnitz (stream Parts) returns (Gyzmeaux); +// } +type ServiceNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + OpenBrace *RuneNode + Decls []ServiceElement + CloseBrace *RuneNode +} + +func (*ServiceNode) fileElement() {} + +// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil. +// - keyword: The token corresponding to the "service" keyword. +// - name: The token corresponding to the service's name. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the service body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 4+len(decls)) + children = append(children, keyword, name, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *RPCNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid ServiceElement type: %T", decl)) + } + } + + return &ServiceNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +// ServiceElement is an interface implemented by all AST nodes that can +// appear in the body of a service declaration. +type ServiceElement interface { + Node + serviceElement() +} + +var _ ServiceElement = (*OptionNode)(nil) +var _ ServiceElement = (*RPCNode)(nil) +var _ ServiceElement = (*EmptyDeclNode)(nil) + +// RPCDeclNode is a placeholder interface for AST nodes that represent RPC +// declarations. This allows NoSourceNode to be used in place of *RPCNode +// for some usages. +type RPCDeclNode interface { + Node + GetInputType() Node + GetOutputType() Node +} + +var _ RPCDeclNode = (*RPCNode)(nil) +var _ RPCDeclNode = NoSourceNode{} + +// RPCNode represents an RPC declaration. Example: +// +// rpc Foo (Bar) returns (Baz); +type RPCNode struct { + compositeNode + Keyword *KeywordNode + Name *IdentNode + Input *RPCTypeNode + Returns *KeywordNode + Output *RPCTypeNode + Semicolon *RuneNode + OpenBrace *RuneNode + Decls []RPCElement + CloseBrace *RuneNode +} + +func (n *RPCNode) serviceElement() {} + +// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - semicolon: The token corresponding to the ";" rune that ends the declaration. +func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if semicolon == nil { + panic("semicolon is nil") + } + children := []Node{keyword, name, input, returns, output, semicolon} + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + Semicolon: semicolon, + } +} + +// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly +// options). All arguments must be non-nil. +// - keyword: The token corresponding to the "rpc" keyword. +// - name: The token corresponding to the RPC's name. +// - input: The token corresponding to the RPC input message type. +// - returns: The token corresponding to the "returns" keyword that precedes the output type. +// - output: The token corresponding to the RPC output message type. +// - openBrace: The token corresponding to the "{" rune that starts the body. +// - decls: All declarations inside the RPC body. +// - closeBrace: The token corresponding to the "}" rune that ends the body. +func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode { + if keyword == nil { + panic("keyword is nil") + } + if name == nil { + panic("name is nil") + } + if input == nil { + panic("input is nil") + } + if returns == nil { + panic("returns is nil") + } + if output == nil { + panic("output is nil") + } + if openBrace == nil { + panic("openBrace is nil") + } + if closeBrace == nil { + panic("closeBrace is nil") + } + children := make([]Node, 0, 7+len(decls)) + children = append(children, keyword, name, input, returns, output, openBrace) + for _, decl := range decls { + children = append(children, decl) + } + children = append(children, closeBrace) + + for _, decl := range decls { + switch decl := decl.(type) { + case *OptionNode, *EmptyDeclNode: + default: + panic(fmt.Sprintf("invalid RPCElement type: %T", decl)) + } + } + + return &RPCNode{ + compositeNode: compositeNode{ + children: children, + }, + Keyword: keyword, + Name: name, + Input: input, + Returns: returns, + Output: output, + OpenBrace: openBrace, + Decls: decls, + CloseBrace: closeBrace, + } +} + +func (n *RPCNode) GetInputType() Node { + return n.Input.MessageType +} + +func (n *RPCNode) GetOutputType() Node { + return n.Output.MessageType +} + +// RPCElement is an interface implemented by all AST nodes that can +// appear in the body of an rpc declaration (aka method). +type RPCElement interface { + Node + methodElement() +} + +var _ RPCElement = (*OptionNode)(nil) +var _ RPCElement = (*EmptyDeclNode)(nil) + +// RPCTypeNode represents the declaration of a request or response type for an +// RPC. Example: +// +// (stream foo.Bar) +type RPCTypeNode struct { + compositeNode + OpenParen *RuneNode + Stream *KeywordNode + MessageType IdentValueNode + CloseParen *RuneNode +} + +// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil +// except stream, which may be nil. +// - openParen: The token corresponding to the "(" rune that starts the declaration. +// - stream: The token corresponding to the "stream" keyword or nil if not present. +// - msgType: The token corresponding to the message type's name. +// - closeParen: The token corresponding to the ")" rune that ends the declaration. +func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode { + if openParen == nil { + panic("openParen is nil") + } + if msgType == nil { + panic("msgType is nil") + } + if closeParen == nil { + panic("closeParen is nil") + } + var children []Node + if stream != nil { + children = []Node{openParen, stream, msgType, closeParen} + } else { + children = []Node{openParen, msgType, closeParen} + } + + return &RPCTypeNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenParen: openParen, + Stream: stream, + MessageType: msgType, + CloseParen: closeParen, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go new file mode 100644 index 00000000..8ab09c6f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go @@ -0,0 +1,29 @@ +package ast + +import ( + "github.com/bufbuild/protocompile/ast" +) + +// SourcePos identifies a location in a proto source file. +type SourcePos = ast.SourcePos + +// PosRange is a range of positions in a source file that indicates +// the span of some region of source, such as a single token or +// a sub-tree of the AST. +type PosRange struct { + Start, End SourcePos +} + +// Comment represents a single comment in a source file. It indicates +// the position of the comment and its contents. +type Comment struct { + // The location of the comment in the source file. + PosRange + // Any whitespace between the prior lexical element (either a token + // or other comment) and this comment. + LeadingWhitespace string + // The text of the comment, including any "//" or "/*" and "*/" + // symbols at the start and end. Single-line comments will include + // the trailing newline rune in Text. + Text string +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go new file mode 100644 index 00000000..91f5a354 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go @@ -0,0 +1,575 @@ +package ast + +import ( + "fmt" + "math" + "strings" +) + +// ValueNode is an AST node that represents a literal value. +// +// It also includes references (e.g. IdentifierValueNode), which can be +// used as values in some contexts, such as describing the default value +// for a field, which can refer to an enum value. +// +// This also allows NoSourceNode to be used in place of a real value node +// for some usages. +type ValueNode interface { + Node + // Value returns a Go representation of the value. For scalars, this + // will be a string, int64, uint64, float64, or bool. This could also + // be an Identifier (e.g. IdentValueNodes). It can also be a composite + // literal: + // * For array literals, the type returned will be []ValueNode + // * For message literals, the type returned will be []*MessageFieldNode + Value() interface{} +} + +var _ ValueNode = (*IdentNode)(nil) +var _ ValueNode = (*CompoundIdentNode)(nil) +var _ ValueNode = (*StringLiteralNode)(nil) +var _ ValueNode = (*CompoundStringLiteralNode)(nil) +var _ ValueNode = (*UintLiteralNode)(nil) +var _ ValueNode = (*PositiveUintLiteralNode)(nil) +var _ ValueNode = (*NegativeIntLiteralNode)(nil) +var _ ValueNode = (*FloatLiteralNode)(nil) +var _ ValueNode = (*SpecialFloatLiteralNode)(nil) +var _ ValueNode = (*SignedFloatLiteralNode)(nil) +var _ ValueNode = (*BoolLiteralNode)(nil) +var _ ValueNode = (*ArrayLiteralNode)(nil) +var _ ValueNode = (*MessageLiteralNode)(nil) +var _ ValueNode = NoSourceNode{} + +// StringValueNode is an AST node that represents a string literal. +// Such a node can be a single literal (*StringLiteralNode) or a +// concatenation of multiple literals (*CompoundStringLiteralNode). +type StringValueNode interface { + ValueNode + AsString() string +} + +var _ StringValueNode = (*StringLiteralNode)(nil) +var _ StringValueNode = (*CompoundStringLiteralNode)(nil) + +// StringLiteralNode represents a simple string literal. Example: +// +// "proto2" +type StringLiteralNode struct { + terminalNode + // Val is the actual string value that the literal indicates. + Val string +} + +// NewStringLiteralNode creates a new *StringLiteralNode with the given val. +func NewStringLiteralNode(val string, info TokenInfo) *StringLiteralNode { + return &StringLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *StringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *StringLiteralNode) AsString() string { + return n.Val +} + +// CompoundStringLiteralNode represents a compound string literal, which is +// the concatenaton of adjacent string literals. Example: +// +// "this " "is" " all one " "string" +type CompoundStringLiteralNode struct { + compositeNode + Val string +} + +// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that +// consists of the given string components. The components argument may not be +// empty. +func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode { + if len(components) == 0 { + panic("must have at least one component") + } + children := make([]Node, len(components)) + var b strings.Builder + for i, comp := range components { + children[i] = comp + b.WriteString(comp.Val) + } + return &CompoundStringLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Val: b.String(), + } +} + +func (n *CompoundStringLiteralNode) Value() interface{} { + return n.AsString() +} + +func (n *CompoundStringLiteralNode) AsString() string { + return n.Val +} + +// IntValueNode is an AST node that represents an integer literal. If +// an integer literal is too large for an int64 (or uint64 for +// positive literals), it is represented instead by a FloatValueNode. +type IntValueNode interface { + ValueNode + AsInt64() (int64, bool) + AsUint64() (uint64, bool) +} + +// AsInt32 range checks the given int value and returns its value is +// in the range or 0, false if it is outside the range. +func AsInt32(n IntValueNode, min, max int32) (int32, bool) { + i, ok := n.AsInt64() + if !ok { + return 0, false + } + if i < int64(min) || i > int64(max) { + return 0, false + } + return int32(i), true +} + +var _ IntValueNode = (*UintLiteralNode)(nil) +var _ IntValueNode = (*PositiveUintLiteralNode)(nil) +var _ IntValueNode = (*NegativeIntLiteralNode)(nil) + +// UintLiteralNode represents a simple integer literal with no sign character. +type UintLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val uint64 +} + +// NewUintLiteralNode creates a new *UintLiteralNode with the given val. +func NewUintLiteralNode(val uint64, info TokenInfo) *UintLiteralNode { + return &UintLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *UintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *UintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *UintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +func (n *UintLiteralNode) AsFloat() float64 { + return float64(n.Val) +} + +// PositiveUintLiteralNode represents an integer literal with a positive (+) sign. +// +// Deprecated: A valid AST will not contain a node of this type. The Protobuf +// language does not actually allow a numeric literal to have a leading "+" +// positive sign. +type PositiveUintLiteralNode struct { + compositeNode + Plus *RuneNode + Uint *UintLiteralNode + Val uint64 +} + +// NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both +// arguments must be non-nil. +// +// Deprecated: The ast.PositiveUintLiteralNode node type should not be used. +func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &PositiveUintLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Plus: sign, + Uint: i, + Val: i.Val, + } +} + +func (n *PositiveUintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *PositiveUintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *PositiveUintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + +// NegativeIntLiteralNode represents an integer literal with a negative (-) sign. +type NegativeIntLiteralNode struct { + compositeNode + Minus *RuneNode + Uint *UintLiteralNode + Val int64 +} + +// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both +// arguments must be non-nil. +func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &NegativeIntLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Minus: sign, + Uint: i, + Val: -int64(i.Val), + } +} + +func (n *NegativeIntLiteralNode) Value() interface{} { + return n.Val +} + +func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) { + return n.Val, true +} + +func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) { + if n.Val < 0 { + return 0, false + } + return uint64(n.Val), true +} + +// FloatValueNode is an AST node that represents a numeric literal with +// a floating point, in scientific notation, or too large to fit in an +// int64 or uint64. +type FloatValueNode interface { + ValueNode + AsFloat() float64 +} + +var _ FloatValueNode = (*FloatLiteralNode)(nil) +var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil) +var _ FloatValueNode = (*UintLiteralNode)(nil) + +// FloatLiteralNode represents a floating point numeric literal. +type FloatLiteralNode struct { + terminalNode + // Val is the numeric value indicated by the literal + Val float64 +} + +// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val. +func NewFloatLiteralNode(val float64, info TokenInfo) *FloatLiteralNode { + return &FloatLiteralNode{ + terminalNode: info.asTerminalNode(), + Val: val, + } +} + +func (n *FloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *FloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SpecialFloatLiteralNode represents a special floating point numeric literal +// for "inf" and "nan" values. +type SpecialFloatLiteralNode struct { + *KeywordNode + Val float64 +} + +// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the +// given keyword, which must be "inf" or "nan". +func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode { + var f float64 + if name.Val == "inf" { + f = math.Inf(1) + } else { + f = math.NaN() + } + return &SpecialFloatLiteralNode{ + KeywordNode: name, + Val: f, + } +} + +func (n *SpecialFloatLiteralNode) Value() interface{} { + return n.AsFloat() +} + +func (n *SpecialFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// SignedFloatLiteralNode represents a signed floating point number. +type SignedFloatLiteralNode struct { + compositeNode + Sign *RuneNode + Float FloatValueNode + Val float64 +} + +// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both +// arguments must be non-nil. +func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode { + if sign == nil { + panic("sign is nil") + } + if f == nil { + panic("f is nil") + } + children := []Node{sign, f} + val := f.AsFloat() + if sign.Rune == '-' { + val = -val + } + return &SignedFloatLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Sign: sign, + Float: f, + Val: val, + } +} + +func (n *SignedFloatLiteralNode) Value() interface{} { + return n.Val +} + +func (n *SignedFloatLiteralNode) AsFloat() float64 { + return n.Val +} + +// BoolLiteralNode represents a boolean literal. +// +// Deprecated: The AST uses IdentNode for boolean literals, where the +// identifier value is "true" or "false". This is required because an +// identifier "true" is not necessarily a boolean value as it could also +// be an enum value named "true" (ditto for "false"). +type BoolLiteralNode struct { + *KeywordNode + Val bool +} + +// NewBoolLiteralNode returns a new *BoolLiteralNode for the given keyword, +// which must be "true" or "false". +func NewBoolLiteralNode(name *KeywordNode) *BoolLiteralNode { + return &BoolLiteralNode{ + KeywordNode: name, + Val: name.Val == "true", + } +} + +func (n *BoolLiteralNode) Value() interface{} { + return n.Val +} + +// ArrayLiteralNode represents an array literal, which is only allowed inside of +// a MessageLiteralNode, to indicate values for a repeated field. Example: +// +// ["foo", "bar", "baz"] +type ArrayLiteralNode struct { + compositeNode + OpenBracket *RuneNode + Elements []ValueNode + // Commas represent the separating ',' characters between elements. The + // length of this slice must be exactly len(Elements)-1, with each item + // in Elements having a corresponding item in this slice *except the last* + // (since a trailing comma is not allowed). + Commas []*RuneNode + CloseBracket *RuneNode +} + +// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and +// closeBracket args must be non-nil and represent the "[" and "]" runes that +// surround the array values. The given commas arg must have a length that is +// one less than the length of the vals arg. However, vals may be empty, in +// which case commas must also be empty. +func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode { + if openBracket == nil { + panic("openBracket is nil") + } + if closeBracket == nil { + panic("closeBracket is nil") + } + if len(vals) == 0 && len(commas) != 0 { + panic("vals is empty but commas is not") + } + if len(vals) > 0 && len(commas) != len(vals)-1 { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas))) + } + children := make([]Node, 0, len(vals)*2+1) + children = append(children, openBracket) + for i, val := range vals { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) + } + children = append(children, commas[i-1]) + } + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + } + children = append(children, closeBracket) + + return &ArrayLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + OpenBracket: openBracket, + Elements: vals, + Commas: commas, + CloseBracket: closeBracket, + } +} + +func (n *ArrayLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageLiteralNode represents a message literal, which is compatible with the +// protobuf text format and can be used for custom options with message types. +// Example: +// +// { foo:1 foo:2 foo:3 bar: } +type MessageLiteralNode struct { + compositeNode + Open *RuneNode // should be '{' or '<' + Elements []*MessageFieldNode + // Separator characters between elements, which can be either ',' + // or ';' if present. This slice must be exactly len(Elements) in + // length, with each item in Elements having one corresponding item + // in Seps. Separators in message literals are optional, so a given + // item in this slice may be nil to indicate absence of a separator. + Seps []*RuneNode + Close *RuneNode // should be '}' or '>', depending on Open +} + +// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and +// closeSym runes must not be nil and should be "{" and "}" or "<" and ">". +// +// Unlike separators (dots and commas) used for other AST nodes that represent +// a list of elements, the seps arg must be the SAME length as vals, and it may +// contain nil values to indicate absence of a separator (in fact, it could be +// all nils). +func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode { + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if len(seps) != len(vals) { + panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps))) + } + numChildren := len(vals) + 2 + for _, sep := range seps { + if sep != nil { + numChildren++ + } + } + children := make([]Node, 0, numChildren) + children = append(children, openSym) + for i, val := range vals { + if val == nil { + panic(fmt.Sprintf("vals[%d] is nil", i)) + } + children = append(children, val) + if seps[i] != nil { + children = append(children, seps[i]) + } + } + children = append(children, closeSym) + + return &MessageLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + Elements: vals, + Seps: seps, + Close: closeSym, + } +} + +func (n *MessageLiteralNode) Value() interface{} { + return n.Elements +} + +// MessageFieldNode represents a single field (name and value) inside of a +// message literal. Example: +// +// foo:"bar" +type MessageFieldNode struct { + compositeNode + Name *FieldReferenceNode + // Sep represents the ':' separator between the name and value. If + // the value is a message literal (and thus starts with '<' or '{') + // or an array literal (starting with '[') then the separator is + // optional, and thus may be nil. + Sep *RuneNode + Val ValueNode +} + +// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep +// must be non-nil. +func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode { + if name == nil { + panic("name is nil") + } + if val == nil { + panic("val is nil") + } + numChildren := 2 + if sep != nil { + numChildren++ + } + children := make([]Node, 0, numChildren) + children = append(children, name) + if sep != nil { + children = append(children, sep) + } + children = append(children, val) + + return &MessageFieldNode{ + compositeNode: compositeNode{ + children: children, + }, + Name: name, + Sep: sep, + Val: val, + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go new file mode 100644 index 00000000..e9b85064 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go @@ -0,0 +1,497 @@ +package ast + +// VisitFunc is used to examine a node in the AST when walking the tree. +// It returns true or false as to whether or not the descendants of the +// given node should be visited. If it returns true, the node's children +// will be visisted; if false, they will not. When returning true, it +// can also return a new VisitFunc to use for the children. If it returns +// (true, nil), then the current function will be re-used when visiting +// the children. +// +// See also the Visitor type. +type VisitFunc func(Node) (bool, VisitFunc) + +// Walk conducts a walk of the AST rooted at the given root using the +// given function. It performs a "pre-order traversal", visiting a +// given AST node before it visits that node's descendants. +func Walk(root Node, v VisitFunc) { + ok, next := v(root) + if !ok { + return + } + if next != nil { + v = next + } + if comp, ok := root.(CompositeNode); ok { + for _, child := range comp.Children() { + Walk(child, v) + } + } +} + +// Visitor provides a technique for walking the AST that allows for +// dynamic dispatch, where a particular function is invoked based on +// the runtime type of the argument. +// +// It consists of a number of functions, each of which matches a +// concrete Node type. It also includes functions for sub-interfaces +// of Node and the Node interface itself, to be used as broader +// "catch all" functions. +// +// To use a visitor, provide a function for the node types of +// interest and pass visitor.Visit as the function to a Walk operation. +// When a node is traversed, the corresponding function field of +// the visitor is invoked, if not nil. If the function for a node's +// concrete type is nil/absent but the function for an interface it +// implements is present, that interface visit function will be used +// instead. If no matching function is present, the traversal will +// continue. If a matching function is present, it will be invoked +// and its response determines how the traversal proceeds. +// +// Every visit function returns (bool, *Visitor). If the bool returned +// is false, the visited node's descendants are skipped. Otherwise, +// traversal will continue into the node's children. If the returned +// visitor is nil, the current visitor will continue to be used. But +// if a non-nil visitor is returned, it will be used to visit the +// node's children. +type Visitor struct { + // VisitFileNode is invoked when visiting a *FileNode in the AST. + VisitFileNode func(*FileNode) (bool, *Visitor) + // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. + VisitSyntaxNode func(*SyntaxNode) (bool, *Visitor) + + // TODO: add VisitEditionNode + + // VisitPackageNode is invoked when visiting a *PackageNode in the AST. + VisitPackageNode func(*PackageNode) (bool, *Visitor) + // VisitImportNode is invoked when visiting an *ImportNode in the AST. + VisitImportNode func(*ImportNode) (bool, *Visitor) + // VisitOptionNode is invoked when visiting an *OptionNode in the AST. + VisitOptionNode func(*OptionNode) (bool, *Visitor) + // VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST. + VisitOptionNameNode func(*OptionNameNode) (bool, *Visitor) + // VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST. + VisitFieldReferenceNode func(*FieldReferenceNode) (bool, *Visitor) + // VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST. + VisitCompactOptionsNode func(*CompactOptionsNode) (bool, *Visitor) + // VisitMessageNode is invoked when visiting a *MessageNode in the AST. + VisitMessageNode func(*MessageNode) (bool, *Visitor) + // VisitExtendNode is invoked when visiting an *ExtendNode in the AST. + VisitExtendNode func(*ExtendNode) (bool, *Visitor) + // VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST. + VisitExtensionRangeNode func(*ExtensionRangeNode) (bool, *Visitor) + // VisitReservedNode is invoked when visiting a *ReservedNode in the AST. + VisitReservedNode func(*ReservedNode) (bool, *Visitor) + // VisitRangeNode is invoked when visiting a *RangeNode in the AST. + VisitRangeNode func(*RangeNode) (bool, *Visitor) + // VisitFieldNode is invoked when visiting a *FieldNode in the AST. + VisitFieldNode func(*FieldNode) (bool, *Visitor) + // VisitGroupNode is invoked when visiting a *GroupNode in the AST. + VisitGroupNode func(*GroupNode) (bool, *Visitor) + // VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST. + VisitMapFieldNode func(*MapFieldNode) (bool, *Visitor) + // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST. + VisitMapTypeNode func(*MapTypeNode) (bool, *Visitor) + // VisitOneOfNode is invoked when visiting a *OneOfNode in the AST. + VisitOneOfNode func(*OneOfNode) (bool, *Visitor) + // VisitEnumNode is invoked when visiting an *EnumNode in the AST. + VisitEnumNode func(*EnumNode) (bool, *Visitor) + // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST. + VisitEnumValueNode func(*EnumValueNode) (bool, *Visitor) + // VisitServiceNode is invoked when visiting a *ServiceNode in the AST. + VisitServiceNode func(*ServiceNode) (bool, *Visitor) + // VisitRPCNode is invoked when visiting an *RPCNode in the AST. + VisitRPCNode func(*RPCNode) (bool, *Visitor) + // VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST. + VisitRPCTypeNode func(*RPCTypeNode) (bool, *Visitor) + // VisitIdentNode is invoked when visiting an *IdentNode in the AST. + VisitIdentNode func(*IdentNode) (bool, *Visitor) + // VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST. + VisitCompoundIdentNode func(*CompoundIdentNode) (bool, *Visitor) + // VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST. + VisitStringLiteralNode func(*StringLiteralNode) (bool, *Visitor) + // VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST. + VisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) (bool, *Visitor) + // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. + VisitUintLiteralNode func(*UintLiteralNode) (bool, *Visitor) + // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST. + // + // Deprecated: this node type will not actually be present in an AST. + VisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) (bool, *Visitor) + // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. + VisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) (bool, *Visitor) + // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST. + VisitFloatLiteralNode func(*FloatLiteralNode) (bool, *Visitor) + // VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST. + VisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) (bool, *Visitor) + // VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST. + VisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) (bool, *Visitor) + // VisitBoolLiteralNode is invoked when visiting a *BoolLiteralNode in the AST. + VisitBoolLiteralNode func(*BoolLiteralNode) (bool, *Visitor) + // VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST. + VisitArrayLiteralNode func(*ArrayLiteralNode) (bool, *Visitor) + // VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST. + VisitMessageLiteralNode func(*MessageLiteralNode) (bool, *Visitor) + // VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST. + VisitMessageFieldNode func(*MessageFieldNode) (bool, *Visitor) + // VisitKeywordNode is invoked when visiting a *KeywordNode in the AST. + VisitKeywordNode func(*KeywordNode) (bool, *Visitor) + // VisitRuneNode is invoked when visiting a *RuneNode in the AST. + VisitRuneNode func(*RuneNode) (bool, *Visitor) + // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST. + VisitEmptyDeclNode func(*EmptyDeclNode) (bool, *Visitor) + + // VisitFieldDeclNode is invoked when visiting a FieldDeclNode in the AST. + // This function is used when no concrete type function is provided. If + // both this and VisitMessageDeclNode are provided, and a node implements + // both (such as *GroupNode and *MapFieldNode), this function will be + // invoked and not the other. + VisitFieldDeclNode func(FieldDeclNode) (bool, *Visitor) + // VisitMessageDeclNode is invoked when visiting a MessageDeclNode in the AST. + // This function is used when no concrete type function is provided. + VisitMessageDeclNode func(MessageDeclNode) (bool, *Visitor) + + // VisitIdentValueNode is invoked when visiting an IdentValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitIdentValueNode func(IdentValueNode) (bool, *Visitor) + // VisitStringValueNode is invoked when visiting a StringValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitStringValueNode func(StringValueNode) (bool, *Visitor) + // VisitIntValueNode is invoked when visiting an IntValueNode in the AST. + // This function is used when no concrete type function is provided. If + // both this and VisitFloatValueNode are provided, and a node implements + // both (such as *UintLiteralNode), this function will be invoked and + // not the other. + VisitIntValueNode func(IntValueNode) (bool, *Visitor) + // VisitFloatValueNode is invoked when visiting a FloatValueNode in the AST. + // This function is used when no concrete type function is provided. + VisitFloatValueNode func(FloatValueNode) (bool, *Visitor) + // VisitValueNode is invoked when visiting a ValueNode in the AST. This + // function is used when no concrete type function is provided and no + // more specific ValueNode function is provided that matches the node. + VisitValueNode func(ValueNode) (bool, *Visitor) + + // VisitTerminalNode is invoked when visiting a TerminalNode in the AST. + // This function is used when no concrete type function is provided + // no more specific interface type function is provided. + VisitTerminalNode func(TerminalNode) (bool, *Visitor) + // VisitCompositeNode is invoked when visiting a CompositeNode in the AST. + // This function is used when no concrete type function is provided + // no more specific interface type function is provided. + VisitCompositeNode func(CompositeNode) (bool, *Visitor) + // VisitNode is invoked when visiting a Node in the AST. This + // function is only used when no other more specific function is + // provided. + VisitNode func(Node) (bool, *Visitor) +} + +// Visit provides the Visitor's implementation of VisitFunc, to be +// used with Walk operations. +func (v *Visitor) Visit(n Node) (bool, VisitFunc) { + var ok, matched bool + var next *Visitor + switch n := n.(type) { + case *FileNode: + if v.VisitFileNode != nil { + matched = true + ok, next = v.VisitFileNode(n) + } + case *SyntaxNode: + if v.VisitSyntaxNode != nil { + matched = true + ok, next = v.VisitSyntaxNode(n) + } + case *PackageNode: + if v.VisitPackageNode != nil { + matched = true + ok, next = v.VisitPackageNode(n) + } + case *ImportNode: + if v.VisitImportNode != nil { + matched = true + ok, next = v.VisitImportNode(n) + } + case *OptionNode: + if v.VisitOptionNode != nil { + matched = true + ok, next = v.VisitOptionNode(n) + } + case *OptionNameNode: + if v.VisitOptionNameNode != nil { + matched = true + ok, next = v.VisitOptionNameNode(n) + } + case *FieldReferenceNode: + if v.VisitFieldReferenceNode != nil { + matched = true + ok, next = v.VisitFieldReferenceNode(n) + } + case *CompactOptionsNode: + if v.VisitCompactOptionsNode != nil { + matched = true + ok, next = v.VisitCompactOptionsNode(n) + } + case *MessageNode: + if v.VisitMessageNode != nil { + matched = true + ok, next = v.VisitMessageNode(n) + } + case *ExtendNode: + if v.VisitExtendNode != nil { + matched = true + ok, next = v.VisitExtendNode(n) + } + case *ExtensionRangeNode: + if v.VisitExtensionRangeNode != nil { + matched = true + ok, next = v.VisitExtensionRangeNode(n) + } + case *ReservedNode: + if v.VisitReservedNode != nil { + matched = true + ok, next = v.VisitReservedNode(n) + } + case *RangeNode: + if v.VisitRangeNode != nil { + matched = true + ok, next = v.VisitRangeNode(n) + } + case *FieldNode: + if v.VisitFieldNode != nil { + matched = true + ok, next = v.VisitFieldNode(n) + } + case *GroupNode: + if v.VisitGroupNode != nil { + matched = true + ok, next = v.VisitGroupNode(n) + } + case *MapFieldNode: + if v.VisitMapFieldNode != nil { + matched = true + ok, next = v.VisitMapFieldNode(n) + } + case *MapTypeNode: + if v.VisitMapTypeNode != nil { + matched = true + ok, next = v.VisitMapTypeNode(n) + } + case *OneOfNode: + if v.VisitOneOfNode != nil { + matched = true + ok, next = v.VisitOneOfNode(n) + } + case *EnumNode: + if v.VisitEnumNode != nil { + matched = true + ok, next = v.VisitEnumNode(n) + } + case *EnumValueNode: + if v.VisitEnumValueNode != nil { + matched = true + ok, next = v.VisitEnumValueNode(n) + } + case *ServiceNode: + if v.VisitServiceNode != nil { + matched = true + ok, next = v.VisitServiceNode(n) + } + case *RPCNode: + if v.VisitRPCNode != nil { + matched = true + ok, next = v.VisitRPCNode(n) + } + case *RPCTypeNode: + if v.VisitRPCTypeNode != nil { + matched = true + ok, next = v.VisitRPCTypeNode(n) + } + case *IdentNode: + if v.VisitIdentNode != nil { + matched = true + ok, next = v.VisitIdentNode(n) + } + case *CompoundIdentNode: + if v.VisitCompoundIdentNode != nil { + matched = true + ok, next = v.VisitCompoundIdentNode(n) + } + case *StringLiteralNode: + if v.VisitStringLiteralNode != nil { + matched = true + ok, next = v.VisitStringLiteralNode(n) + } + case *CompoundStringLiteralNode: + if v.VisitCompoundStringLiteralNode != nil { + matched = true + ok, next = v.VisitCompoundStringLiteralNode(n) + } + case *UintLiteralNode: + if v.VisitUintLiteralNode != nil { + matched = true + ok, next = v.VisitUintLiteralNode(n) + } + case *PositiveUintLiteralNode: + if v.VisitPositiveUintLiteralNode != nil { + matched = true + ok, next = v.VisitPositiveUintLiteralNode(n) + } + case *NegativeIntLiteralNode: + if v.VisitNegativeIntLiteralNode != nil { + matched = true + ok, next = v.VisitNegativeIntLiteralNode(n) + } + case *FloatLiteralNode: + if v.VisitFloatLiteralNode != nil { + matched = true + ok, next = v.VisitFloatLiteralNode(n) + } + case *SpecialFloatLiteralNode: + if v.VisitSpecialFloatLiteralNode != nil { + matched = true + ok, next = v.VisitSpecialFloatLiteralNode(n) + } + case *SignedFloatLiteralNode: + if v.VisitSignedFloatLiteralNode != nil { + matched = true + ok, next = v.VisitSignedFloatLiteralNode(n) + } + case *BoolLiteralNode: + if v.VisitBoolLiteralNode != nil { + matched = true + ok, next = v.VisitBoolLiteralNode(n) + } + case *ArrayLiteralNode: + if v.VisitArrayLiteralNode != nil { + matched = true + ok, next = v.VisitArrayLiteralNode(n) + } + case *MessageLiteralNode: + if v.VisitMessageLiteralNode != nil { + matched = true + ok, next = v.VisitMessageLiteralNode(n) + } + case *MessageFieldNode: + if v.VisitMessageFieldNode != nil { + matched = true + ok, next = v.VisitMessageFieldNode(n) + } + case *KeywordNode: + if v.VisitKeywordNode != nil { + matched = true + ok, next = v.VisitKeywordNode(n) + } + case *RuneNode: + if v.VisitRuneNode != nil { + matched = true + ok, next = v.VisitRuneNode(n) + } + case *EmptyDeclNode: + if v.VisitEmptyDeclNode != nil { + matched = true + ok, next = v.VisitEmptyDeclNode(n) + } + } + + if !matched { + // Visitor provided no concrete type visit function, so + // check interface types. We do this in several passes + // to provide "priority" for matched interfaces for nodes + // that actually implement more than one interface. + // + // For example, StringLiteralNode implements both + // StringValueNode and ValueNode. Both cases could match + // so the first case is what would match. So if we want + // to test against either, they need to be in different + // switch statements. + switch n := n.(type) { + case FieldDeclNode: + if v.VisitFieldDeclNode != nil { + matched = true + ok, next = v.VisitFieldDeclNode(n) + } + case IdentValueNode: + if v.VisitIdentValueNode != nil { + matched = true + ok, next = v.VisitIdentValueNode(n) + } + case StringValueNode: + if v.VisitStringValueNode != nil { + matched = true + ok, next = v.VisitStringValueNode(n) + } + case IntValueNode: + if v.VisitIntValueNode != nil { + matched = true + ok, next = v.VisitIntValueNode(n) + } + } + } + + if !matched { + // These two are excluded from the above switch so that + // if visitor provides both VisitIntValueNode and + // VisitFloatValueNode, we'll prefer VisitIntValueNode + // for *UintLiteralNode (which implements both). Similarly, + // that way we prefer VisitFieldDeclNode over + // VisitMessageDeclNode when visiting a *GroupNode. + switch n := n.(type) { + case FloatValueNode: + if v.VisitFloatValueNode != nil { + matched = true + ok, next = v.VisitFloatValueNode(n) + } + case MessageDeclNode: + if v.VisitMessageDeclNode != nil { + matched = true + ok, next = v.VisitMessageDeclNode(n) + } + } + } + + if !matched { + switch n := n.(type) { + case ValueNode: + if v.VisitValueNode != nil { + matched = true + ok, next = v.VisitValueNode(n) + } + } + } + + if !matched { + switch n := n.(type) { + case TerminalNode: + if v.VisitTerminalNode != nil { + matched = true + ok, next = v.VisitTerminalNode(n) + } + case CompositeNode: + if v.VisitCompositeNode != nil { + matched = true + ok, next = v.VisitCompositeNode(n) + } + } + } + + if !matched { + // finally, fallback to most generic visit function + if v.VisitNode != nil { + matched = true + ok, next = v.VisitNode(n) + } + } + + if !matched { + // keep descending with the current visitor + return true, nil + } + + if !ok { + return false, nil + } + if next != nil { + return true, next.Visit + } + return true, v.Visit +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go new file mode 100644 index 00000000..c6446d34 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go @@ -0,0 +1,10 @@ +// Package protoparse provides functionality for parsing *.proto source files +// into descriptors that can be used with other protoreflect packages, like +// dynamic messages and dynamic GRPC clients. +// +// This package links in other packages that include compiled descriptors for +// the various "google/protobuf/*.proto" files that are included with protoc. +// That way, like when invoking protoc, programs need not supply copies of these +// "builtin" files. Though if copies of the files are provided, they will be +// used instead of the builtin descriptors. +package protoparse diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go new file mode 100644 index 00000000..0ec70bd7 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go @@ -0,0 +1,122 @@ +package protoparse + +import ( + "errors" + "fmt" + + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + + "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +// SourcePos is the same as ast.SourcePos. This alias exists for +// backwards compatibility (SourcePos used to be defined in this package.) +type SourcePos = ast.SourcePos + +// ErrInvalidSource is a sentinel error that is returned by calls to +// Parser.ParseFiles and Parser.ParseFilesButDoNotLink in the event that syntax +// or link errors are encountered, but the parser's configured ErrorReporter +// always returns nil. +var ErrInvalidSource = reporter.ErrInvalidSource + +// ErrNoSyntax is a sentinel error that may be passed to a warning reporter. +// The error the reporter receives will be wrapped with source position that +// indicates the file that had no syntax statement. +var ErrNoSyntax = parser.ErrNoSyntax + +// ErrLookupImportAndProtoSet is the error returned if both LookupImport and LookupImportProto are set. +// +// Deprecated: This error is no longer used. It is now legal to set both LookupImport and LookupImportProto +// fields on the Parser. +var ErrLookupImportAndProtoSet = errors.New("both LookupImport and LookupImportProto set") + +// ErrorReporter is responsible for reporting the given error. If the reporter +// returns a non-nil error, parsing/linking will abort with that error. If the +// reporter returns nil, parsing will continue, allowing the parser to try to +// report as many syntax and/or link errors as it can find. +type ErrorReporter = reporter.ErrorReporter + +// WarningReporter is responsible for reporting the given warning. This is used +// for indicating non-error messages to the calling program for things that do +// not cause the parse to fail but are considered bad practice. Though they are +// just warnings, the details are supplied to the reporter via an error type. +type WarningReporter = reporter.WarningReporter + +// ErrorWithPos is an error about a proto source file that includes information +// about the location in the file that caused the error. +// +// The value of Error() will contain both the SourcePos and Underlying error. +// The value of Unwrap() will only be the Underlying error. +type ErrorWithPos = reporter.ErrorWithPos + +// ErrorWithSourcePos is an error about a proto source file that includes +// information about the location in the file that caused the error. +// +// Errors that include source location information *might* be of this type. +// However, calling code that is trying to examine errors with location info +// should instead look for instances of the ErrorWithPos interface, which +// will find other kinds of errors. This type is only exported for backwards +// compatibility. +// +// SourcePos should always be set and never nil. +type ErrorWithSourcePos struct { + // These fields are present and exported for backwards-compatibility + // with v1.4 and earlier. + Underlying error + Pos *SourcePos + + reporter.ErrorWithPos +} + +// Error implements the error interface +func (e ErrorWithSourcePos) Error() string { + sourcePos := e.GetPosition() + return fmt.Sprintf("%s: %v", sourcePos, e.Underlying) +} + +// GetPosition implements the ErrorWithPos interface, supplying a location in +// proto source that caused the error. +func (e ErrorWithSourcePos) GetPosition() SourcePos { + if e.Pos == nil { + return SourcePos{Filename: ""} + } + return *e.Pos +} + +// Unwrap implements the ErrorWithPos interface, supplying the underlying +// error. This error will not include location information. +func (e ErrorWithSourcePos) Unwrap() error { + return e.Underlying +} + +var _ ErrorWithPos = ErrorWithSourcePos{} + +func toErrorWithSourcePos(err ErrorWithPos) ErrorWithPos { + pos := err.GetPosition() + return ErrorWithSourcePos{ + ErrorWithPos: err, + Underlying: err.Unwrap(), + Pos: &pos, + } +} + +// ErrorUnusedImport may be passed to a warning reporter when an unused +// import is detected. The error the reporter receives will be wrapped +// with source position that indicates the file and line where the import +// statement appeared. +type ErrorUnusedImport = linker.ErrorUnusedImport + +type errorWithFilename struct { + underlying error + filename string +} + +func (e errorWithFilename) Error() string { + return fmt.Sprintf("%s: %v", e.filename, e.underlying) +} + +func (e errorWithFilename) Unwrap() error { + return e.underlying +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go new file mode 100644 index 00000000..1a6763df --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go @@ -0,0 +1,804 @@ +package protoparse + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/bufbuild/protocompile" + ast2 "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/options" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/protoutil" + "github.com/bufbuild/protocompile/reporter" + "github.com/bufbuild/protocompile/sourceinfo" + "github.com/bufbuild/protocompile/walk" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/internal" + "github.com/jhump/protoreflect/desc/protoparse/ast" +) + +// FileAccessor is an abstraction for opening proto source files. It takes the +// name of the file to open and returns either the input reader or an error. +type FileAccessor func(filename string) (io.ReadCloser, error) + +// FileContentsFromMap returns a FileAccessor that uses the given map of file +// contents. This allows proto source files to be constructed in memory and +// easily supplied to a parser. The map keys are the paths to the proto source +// files, and the values are the actual proto source contents. +func FileContentsFromMap(files map[string]string) FileAccessor { + return func(filename string) (io.ReadCloser, error) { + contents, ok := files[filename] + if !ok { + // Try changing path separators since user-provided + // map may use different separators. + contents, ok = files[filepath.ToSlash(filename)] + if !ok { + return nil, os.ErrNotExist + } + } + return ioutil.NopCloser(strings.NewReader(contents)), nil + } +} + +// Parser parses proto source into descriptors. +type Parser struct { + // The paths used to search for dependencies that are referenced in import + // statements in proto source files. If no import paths are provided then + // "." (current directory) is assumed to be the only import path. + // + // This setting is only used during ParseFiles operations. Since calls to + // ParseFilesButDoNotLink do not link, there is no need to load and parse + // dependencies. + ImportPaths []string + + // If true, the supplied file names/paths need not necessarily match how the + // files are referenced in import statements. The parser will attempt to + // match import statements to supplied paths, "guessing" the import paths + // for the files. Note that this inference is not perfect and link errors + // could result. It works best when all proto files are organized such that + // a single import path can be inferred (e.g. all files under a single tree + // with import statements all being relative to the root of this tree). + InferImportPaths bool + + // LookupImport is a function that accepts a filename and + // returns a file descriptor, which will be consulted when resolving imports. + // This allows a compiled Go proto in another Go module to be referenced + // in the proto(s) being parsed. + // + // In the event of a filename collision, Accessor is consulted first, + // then LookupImport is consulted, and finally the well-known protos + // are used. + // + // For example, in order to automatically look up compiled Go protos that + // have been imported and be able to use them as imports, set this to + // desc.LoadFileDescriptor. + LookupImport func(string) (*desc.FileDescriptor, error) + + // LookupImportProto has the same functionality as LookupImport, however it returns + // a FileDescriptorProto instead of a FileDescriptor. + LookupImportProto func(string) (*descriptorpb.FileDescriptorProto, error) + + // Used to create a reader for a given filename, when loading proto source + // file contents. If unset, os.Open is used. If ImportPaths is also empty + // then relative paths are will be relative to the process's current working + // directory. + Accessor FileAccessor + + // If true, the resulting file descriptors will retain source code info, + // that maps elements to their location in the source files as well as + // includes comments found during parsing (and attributed to elements of + // the source file). + IncludeSourceCodeInfo bool + + // If true, the results from ParseFilesButDoNotLink will be passed through + // some additional validations. But only constraints that do not require + // linking can be checked. These include proto2 vs. proto3 language features, + // looking for incorrect usage of reserved names or tags, and ensuring that + // fields have unique tags and that enum values have unique numbers (unless + // the enum allows aliases). + ValidateUnlinkedFiles bool + + // If true, the results from ParseFilesButDoNotLink will have options + // interpreted. Any uninterpretable options (including any custom options or + // options that refer to message and enum types, which can only be + // interpreted after linking) will be left in uninterpreted_options. Also, + // the "default" pseudo-option for fields can only be interpreted for scalar + // fields, excluding enums. (Interpreting default values for enum fields + // requires resolving enum names, which requires linking.) + InterpretOptionsInUnlinkedFiles bool + + // A custom reporter of syntax and link errors. If not specified, the + // default reporter just returns the reported error, which causes parsing + // to abort after encountering a single error. + // + // The reporter is not invoked for system or I/O errors, only for syntax and + // link errors. + ErrorReporter ErrorReporter + + // A custom reporter of warnings. If not specified, warning messages are ignored. + WarningReporter WarningReporter +} + +// ParseFiles parses the named files into descriptors. The returned slice has +// the same number of entries as the give filenames, in the same order. So the +// first returned descriptor corresponds to the first given name, and so on. +// +// All dependencies for all specified files (including transitive dependencies) +// must be accessible via the parser's Accessor or a link error will occur. The +// exception to this rule is that files can import standard Google-provided +// files -- e.g. google/protobuf/*.proto -- without needing to supply sources +// for these files. Like protoc, this parser has a built-in version of these +// files it can use if they aren't explicitly supplied. +// +// If the Parser has no ErrorReporter set and a syntax or link error occurs, +// parsing will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax or link errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) { + srcInfoMode := protocompile.SourceInfoNone + if p.IncludeSourceCodeInfo { + srcInfoMode = protocompile.SourceInfoExtraComments + } + rep := newReporter(p.ErrorReporter, p.WarningReporter) + res, srcSpanAddr := p.getResolver(filenames) + + if p.InferImportPaths { + // we must first compile everything to protos + results, err := parseToProtosRecursive(res, filenames, reporter.NewHandler(rep), srcSpanAddr) + if err != nil { + return nil, err + } + // then we can infer import paths + var rewritten map[string]string + results, rewritten = fixupFilenames(results) + if len(rewritten) > 0 { + for i := range filenames { + if replace, ok := rewritten[filenames[i]]; ok { + filenames[i] = replace + } + } + } + resolverFromResults := protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + res, ok := results[path] + if !ok { + return protocompile.SearchResult{}, os.ErrNotExist + } + return protocompile.SearchResult{ParseResult: noCloneParseResult{res}}, nil + }) + res = protocompile.CompositeResolver{resolverFromResults, res} + } + + c := protocompile.Compiler{ + Resolver: res, + MaxParallelism: 1, + SourceInfoMode: srcInfoMode, + Reporter: rep, + } + results, err := c.Compile(context.Background(), filenames...) + if err != nil { + return nil, err + } + + fds := make([]protoreflect.FileDescriptor, len(results)) + alreadySeen := make(map[string]struct{}, len(results)) + for i, res := range results { + removeDynamicExtensions(res, alreadySeen) + fds[i] = res + } + return desc.WrapFiles(fds) +} + +type noCloneParseResult struct { + parser.Result +} + +func (r noCloneParseResult) Clone() parser.Result { + // protocompile will clone parser.Result to make sure it can't be shared + // with other compilation operations (which would not be thread-safe). + // However, this parse result cannot be shared with another compile + // operation. That means the clone is unnecessary; so we skip it, to avoid + // the associated performance costs. + return r.Result +} + +// ParseFilesButDoNotLink parses the named files into descriptor protos. The +// results are just protos, not fully-linked descriptors. It is possible that +// descriptors are invalid and still be returned in parsed form without error +// due to the fact that the linking step is skipped (and thus many validation +// steps omitted). +// +// There are a few side effects to not linking the descriptors: +// 1. No options will be interpreted. Options can refer to extensions or have +// message and enum types. Without linking, these extension and type +// references are not resolved, so the options may not be interpretable. +// So all options will appear in UninterpretedOption fields of the various +// descriptor options messages. +// 2. Type references will not be resolved. This means that the actual type +// names in the descriptors may be unqualified and even relative to the +// scope in which the type reference appears. This goes for fields that +// have message and enum types. It also applies to methods and their +// references to request and response message types. +// 3. Type references are not known. For non-scalar fields, until the type +// name is resolved (during linking), it is not known whether the type +// refers to a message or an enum. So all fields with such type references +// will not have their Type set, only the TypeName. +// +// This method will still validate the syntax of parsed files. If the parser's +// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will +// also be performed. +// +// If the Parser has no ErrorReporter set and a syntax error occurs, parsing +// will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.FileDescriptorProto, error) { + rep := newReporter(p.ErrorReporter, p.WarningReporter) + p.ImportPaths = nil // not used for this "do not link" operation. + res, _ := p.getResolver(filenames) + results, err := parseToProtos(res, filenames, reporter.NewHandler(rep), p.ValidateUnlinkedFiles) + if err != nil { + return nil, err + } + + if p.InferImportPaths { + resultsMap := make(map[string]parser.Result, len(results)) + for _, res := range results { + resultsMap[res.FileDescriptorProto().GetName()] = res + } + var rewritten map[string]string + resultsMap, rewritten = fixupFilenames(resultsMap) + if len(rewritten) > 0 { + for i := range filenames { + if replace, ok := rewritten[filenames[i]]; ok { + filenames[i] = replace + } + } + } + for i := range filenames { + results[i] = resultsMap[filenames[i]] + } + } + + protos := make([]*descriptorpb.FileDescriptorProto, len(results)) + for i, res := range results { + protos[i] = res.FileDescriptorProto() + var optsIndex sourceinfo.OptionIndex + if p.InterpretOptionsInUnlinkedFiles { + var err error + optsIndex, err = options.InterpretUnlinkedOptions(res) + if err != nil { + return nil, err + } + removeDynamicExtensionsFromProto(protos[i]) + } + if p.IncludeSourceCodeInfo { + protos[i].SourceCodeInfo = sourceinfo.GenerateSourceInfo(res.AST(), optsIndex, sourceinfo.WithExtraComments()) + } + } + + return protos, nil +} + +// ParseToAST parses the named files into ASTs, or Abstract Syntax Trees. This +// is for consumers of proto files that don't care about compiling the files to +// descriptors, but care deeply about a non-lossy structured representation of +// the source (since descriptors are lossy). This includes formatting tools and +// possibly linters, too. +// +// If the requested filenames include standard imports (such as +// "google/protobuf/empty.proto") and no source is provided, the corresponding +// AST in the returned slice will be nil. These standard imports are only +// available for use as descriptors; no source is available unless it is +// provided by the configured Accessor. +// +// If the Parser has no ErrorReporter set and a syntax error occurs, parsing +// will abort with the first such error encountered. If there is an +// ErrorReporter configured and it returns non-nil, parsing will abort with the +// error it returns. If syntax errors are encountered but the configured +// ErrorReporter always returns nil, the parse fails with ErrInvalidSource. +func (p Parser) ParseToAST(filenames ...string) ([]*ast.FileNode, error) { + rep := newReporter(p.ErrorReporter, p.WarningReporter) + res, _ := p.getResolver(filenames) + asts, _, err := parseToASTs(res, filenames, reporter.NewHandler(rep)) + if err != nil { + return nil, err + } + results := make([]*ast.FileNode, len(asts)) + for i := range asts { + if asts[i] == nil { + // should not be possible but... + return nil, fmt.Errorf("resolver did not produce source for %v", filenames[i]) + } + results[i] = convertAST(asts[i]) + } + return results, nil +} + +func parseToAST(res protocompile.Resolver, filename string, rep *reporter.Handler) (*ast2.FileNode, parser.Result, error) { + searchResult, err := res.FindFileByPath(filename) + if err != nil { + _ = rep.HandleError(err) + return nil, nil, rep.Error() + } + switch { + case searchResult.ParseResult != nil: + return nil, searchResult.ParseResult, nil + case searchResult.Proto != nil: + return nil, parser.ResultWithoutAST(searchResult.Proto), nil + case searchResult.Desc != nil: + return nil, parser.ResultWithoutAST(protoutil.ProtoFromFileDescriptor(searchResult.Desc)), nil + case searchResult.AST != nil: + return searchResult.AST, nil, nil + case searchResult.Source != nil: + astRoot, err := parser.Parse(filename, searchResult.Source, rep) + return astRoot, nil, err + default: + _ = rep.HandleError(fmt.Errorf("resolver did not produce a result for %v", filename)) + return nil, nil, rep.Error() + } +} + +func parseToASTs(res protocompile.Resolver, filenames []string, rep *reporter.Handler) ([]*ast2.FileNode, []parser.Result, error) { + asts := make([]*ast2.FileNode, len(filenames)) + results := make([]parser.Result, len(filenames)) + for i := range filenames { + asts[i], results[i], _ = parseToAST(res, filenames[i], rep) + if rep.ReporterError() != nil { + break + } + } + return asts, results, rep.Error() +} + +func parseToProtos(res protocompile.Resolver, filenames []string, rep *reporter.Handler, validate bool) ([]parser.Result, error) { + asts, results, err := parseToASTs(res, filenames, rep) + if err != nil { + return nil, err + } + for i := range results { + if results[i] != nil { + continue + } + var err error + results[i], err = parser.ResultFromAST(asts[i], validate, rep) + if err != nil { + return nil, err + } + } + return results, nil +} + +func parseToProtosRecursive(res protocompile.Resolver, filenames []string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan) (map[string]parser.Result, error) { + results := make(map[string]parser.Result, len(filenames)) + for _, filename := range filenames { + if err := parseToProtoRecursive(res, filename, rep, srcSpanAddr, results); err != nil { + return results, err + } + } + return results, rep.Error() +} + +func parseToProtoRecursive(res protocompile.Resolver, filename string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan, results map[string]parser.Result) error { + if _, ok := results[filename]; ok { + // already processed this one + return nil + } + results[filename] = nil // placeholder entry + + astRoot, parseResult, err := parseToAST(res, filename, rep) + if err != nil { + return err + } + if parseResult == nil { + parseResult, err = parser.ResultFromAST(astRoot, true, rep) + if err != nil { + return err + } + } + results[filename] = parseResult + + if astRoot != nil { + // We have an AST, so we use it to recursively examine imports. + for _, decl := range astRoot.Decls { + imp, ok := decl.(*ast2.ImportNode) + if !ok { + continue + } + err := func() error { + orig := *srcSpanAddr + *srcSpanAddr = astRoot.NodeInfo(imp.Name) + defer func() { + *srcSpanAddr = orig + }() + + return parseToProtoRecursive(res, imp.Name.AsString(), rep, srcSpanAddr, results) + }() + if err != nil { + return err + } + } + return nil + } + + // Without an AST, we must recursively examine the proto. This makes it harder + // (but not necessarily impossible) to get the source location of the import. + fd := parseResult.FileDescriptorProto() + for i, dep := range fd.Dependency { + path := []int32{internal.File_dependencyTag, int32(i)} + err := func() error { + orig := *srcSpanAddr + found := false + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + if pathsEqual(loc.Path, path) { + start := SourcePos{ + Filename: dep, + Line: int(loc.Span[0]), + Col: int(loc.Span[1]), + } + var end SourcePos + if len(loc.Span) > 3 { + end = SourcePos{ + Filename: dep, + Line: int(loc.Span[2]), + Col: int(loc.Span[3]), + } + } else { + end = SourcePos{ + Filename: dep, + Line: int(loc.Span[0]), + Col: int(loc.Span[2]), + } + } + *srcSpanAddr = ast2.NewSourceSpan(start, end) + found = true + break + } + } + if !found { + *srcSpanAddr = ast2.UnknownSpan(dep) + } + defer func() { + *srcSpanAddr = orig + }() + + return parseToProtoRecursive(res, dep, rep, srcSpanAddr, results) + }() + if err != nil { + return err + } + } + return nil +} + +func pathsEqual(a, b []int32) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func newReporter(errRep ErrorReporter, warnRep WarningReporter) reporter.Reporter { + if errRep != nil { + delegate := errRep + errRep = func(err ErrorWithPos) error { + if _, ok := err.(ErrorWithSourcePos); !ok { + err = toErrorWithSourcePos(err) + } + return delegate(err) + } + } + if warnRep != nil { + delegate := warnRep + warnRep = func(err ErrorWithPos) { + if _, ok := err.(ErrorWithSourcePos); !ok { + err = toErrorWithSourcePos(err) + } + delegate(err) + } + } + return reporter.NewReporter(errRep, warnRep) +} + +func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *ast2.SourceSpan) { + var srcSpan ast2.SourceSpan + accessor := p.Accessor + if accessor == nil { + accessor = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + } + sourceResolver := &protocompile.SourceResolver{ + Accessor: func(filename string) (io.ReadCloser, error) { + in, err := accessor(filename) + if err != nil { + if !strings.Contains(err.Error(), filename) { + // errors that don't include the filename that failed are no bueno + err = errorWithFilename{filename: filename, underlying: err} + } + if srcSpan != nil { + err = reporter.Error(srcSpan, err) + } + } + return in, err + }, + ImportPaths: p.ImportPaths, + } + var importResolver protocompile.CompositeResolver + if p.LookupImport != nil { + importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + fd, err := p.LookupImport(path) + if err != nil { + return protocompile.SearchResult{}, err + } + return protocompile.SearchResult{Desc: fd.UnwrapFile()}, nil + })) + } + if p.LookupImportProto != nil { + importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + fd, err := p.LookupImportProto(path) + if err != nil { + return protocompile.SearchResult{}, err + } + return protocompile.SearchResult{Proto: fd}, nil + })) + } + backupResolver := protocompile.WithStandardImports(importResolver) + return protocompile.CompositeResolver{ + sourceResolver, + protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + return backupResolver.FindFileByPath(path) + }), + }, &srcSpan +} + +func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]parser.Result, rewrittenPaths map[string]string) { + // In the event that the given filenames (keys in the supplied map) do not + // match the actual paths used in 'import' statements in the files, we try + // to revise names in the protos so that they will match and be linkable. + revisedProtos = make(map[string]parser.Result, len(protos)) + rewrittenPaths = make(map[string]string, len(protos)) + + protoPaths := map[string]struct{}{} + // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?) + importCandidates := map[string]map[string]struct{}{} + candidatesAvailable := map[string]struct{}{} + for name := range protos { + candidatesAvailable[name] = struct{}{} + for _, f := range protos { + for _, imp := range f.FileDescriptorProto().Dependency { + if strings.HasSuffix(name, imp) || strings.HasSuffix(imp, name) { + candidates := importCandidates[imp] + if candidates == nil { + candidates = map[string]struct{}{} + importCandidates[imp] = candidates + } + candidates[name] = struct{}{} + } + } + } + } + for imp, candidates := range importCandidates { + // if we found multiple possible candidates, use the one that is an exact match + // if it exists, and otherwise, guess that it's the shortest path (fewest elements) + var best string + for c := range candidates { + if _, ok := candidatesAvailable[c]; !ok { + // already used this candidate and re-written its filename accordingly + continue + } + if c == imp { + // exact match! + best = c + break + } + if best == "" { + best = c + } else { + // NB: We can't actually tell which file is supposed to match + // this import. So we prefer the longest name. On a tie, we + // choose the lexically earliest match. + minLen := strings.Count(best, string(filepath.Separator)) + cLen := strings.Count(c, string(filepath.Separator)) + if cLen > minLen || (cLen == minLen && c < best) { + best = c + } + } + } + if best != "" { + if len(best) > len(imp) { + prefix := best[:len(best)-len(imp)] + protoPaths[prefix] = struct{}{} + } + f := protos[best] + f.FileDescriptorProto().Name = proto.String(imp) + revisedProtos[imp] = f + rewrittenPaths[best] = imp + delete(candidatesAvailable, best) + + // If other candidates are actually references to the same file, remove them. + for c := range candidates { + if _, ok := candidatesAvailable[c]; !ok { + // already used this candidate and re-written its filename accordingly + continue + } + possibleDup := protos[c] + prevName := possibleDup.FileDescriptorProto().Name + possibleDup.FileDescriptorProto().Name = proto.String(imp) + if !proto.Equal(f.FileDescriptorProto(), protos[c].FileDescriptorProto()) { + // not equal: restore name and look at next one + possibleDup.FileDescriptorProto().Name = prevName + continue + } + // This file used a different name but was actually the same file. So + // we prune it from the set. + rewrittenPaths[c] = imp + delete(candidatesAvailable, c) + if len(c) > len(imp) { + prefix := c[:len(c)-len(imp)] + protoPaths[prefix] = struct{}{} + } + } + } + } + + if len(candidatesAvailable) == 0 { + return revisedProtos, rewrittenPaths + } + + if len(protoPaths) == 0 { + for c := range candidatesAvailable { + revisedProtos[c] = protos[c] + } + return revisedProtos, rewrittenPaths + } + + // Any remaining candidates are entry-points (not imported by others), so + // the best bet to "fixing" their file name is to see if they're in one of + // the proto paths we found, and if so strip that prefix. + protoPathStrs := make([]string, len(protoPaths)) + i := 0 + for p := range protoPaths { + protoPathStrs[i] = p + i++ + } + sort.Strings(protoPathStrs) + // we look at paths in reverse order, so we'll use a longer proto path if + // there is more than one match + for c := range candidatesAvailable { + var imp string + for i := len(protoPathStrs) - 1; i >= 0; i-- { + p := protoPathStrs[i] + if strings.HasPrefix(c, p) { + imp = c[len(p):] + break + } + } + if imp != "" { + f := protos[c] + f.FileDescriptorProto().Name = proto.String(imp) + f.FileNode() + revisedProtos[imp] = f + rewrittenPaths[c] = imp + } else { + revisedProtos[c] = protos[c] + } + } + + return revisedProtos, rewrittenPaths +} + +func removeDynamicExtensions(fd protoreflect.FileDescriptor, alreadySeen map[string]struct{}) { + if _, ok := alreadySeen[fd.Path()]; ok { + // already processed + return + } + alreadySeen[fd.Path()] = struct{}{} + res, ok := fd.(linker.Result) + if ok { + removeDynamicExtensionsFromProto(res.FileDescriptorProto()) + } + // also remove extensions from dependencies + for i, length := 0, fd.Imports().Len(); i < length; i++ { + removeDynamicExtensions(fd.Imports().Get(i).FileDescriptor, alreadySeen) + } +} + +func removeDynamicExtensionsFromProto(fd *descriptorpb.FileDescriptorProto) { + // protocompile returns descriptors with dynamic extension fields for custom options. + // But protoparse only used known custom options and everything else defined in the + // sources would be stored as unrecognized fields. So to bridge the difference in + // behavior, we need to remove custom options from the given file and add them back + // via serializing-then-de-serializing them back into the options messages. That way, + // statically known options will be properly typed and others will be unrecognized. + // + // This is best effort. So if an error occurs, we'll still return a result, but it + // may include a dynamic extension. + fd.Options = removeDynamicExtensionsFromOptions(fd.Options) + _ = walk.DescriptorProtos(fd, func(_ protoreflect.FullName, msg proto.Message) error { + switch msg := msg.(type) { + case *descriptorpb.DescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + for _, extr := range msg.ExtensionRange { + extr.Options = removeDynamicExtensionsFromOptions(extr.Options) + } + case *descriptorpb.FieldDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.OneofDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.EnumDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.EnumValueDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.ServiceDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + case *descriptorpb.MethodDescriptorProto: + msg.Options = removeDynamicExtensionsFromOptions(msg.Options) + } + return nil + }) +} + +type ptrMsg[T any] interface { + *T + proto.Message +} + +type fieldValue struct { + fd protoreflect.FieldDescriptor + val protoreflect.Value +} + +func removeDynamicExtensionsFromOptions[O ptrMsg[T], T any](opts O) O { + if opts == nil { + return nil + } + var dynamicExtensions []fieldValue + opts.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if fd.IsExtension() { + dynamicExtensions = append(dynamicExtensions, fieldValue{fd: fd, val: val}) + } + return true + }) + + // serialize only these custom options + optsWithOnlyDyn := opts.ProtoReflect().Type().New() + for _, fv := range dynamicExtensions { + optsWithOnlyDyn.Set(fv.fd, fv.val) + } + data, err := proto.MarshalOptions{AllowPartial: true}.Marshal(optsWithOnlyDyn.Interface()) + if err != nil { + // oh, well... can't fix this one + return opts + } + + // and then replace values by clearing these custom options and deserializing + optsClone := proto.Clone(opts).ProtoReflect() + for _, fv := range dynamicExtensions { + optsClone.Clear(fv.fd) + } + err = proto.UnmarshalOptions{AllowPartial: true, Merge: true}.Unmarshal(data, optsClone.Interface()) + if err != nil { + // bummer, can't fix this one + return opts + } + + return optsClone.Interface().(O) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go new file mode 100644 index 00000000..3ae1415a --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go @@ -0,0 +1,175 @@ +package protoparse + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" +) + +var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given") + +// ResolveFilenames tries to resolve fileNames into paths that are relative to +// directories in the given importPaths. The returned slice has the results in +// the same order as they are supplied in fileNames. +// +// The resulting names should be suitable for passing to Parser.ParseFiles. +// +// If no import paths are given and any file name is absolute, this returns an +// error. If no import paths are given and all file names are relative, this +// returns the original file names. If a file name is already relative to one +// of the given import paths, it will be unchanged in the returned slice. If a +// file name given is relative to the current working directory, it will be made +// relative to one of the given import paths; but if it cannot be made relative +// (due to no matching import path), an error will be returned. +func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) { + if len(importPaths) == 0 { + if containsAbsFilePath(fileNames) { + // We have to do this as otherwise parseProtoFiles can result in duplicate symbols. + // For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto" + // as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto") + // with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles, + // it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this, + // adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto" + // from the input file list. This will result in a + // 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto' + // error being returned from ParseFiles. + return nil, errNoImportPathsForAbsoluteFilePath + } + return fileNames, nil + } + absImportPaths, err := absoluteFilePaths(importPaths) + if err != nil { + return nil, err + } + resolvedFileNames := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + resolvedFileName, err := resolveFilename(absImportPaths, fileName) + if err != nil { + return nil, err + } + // On Windows, the resolved paths will use "\", but proto imports + // require the use of "/". So fix up here. + if filepath.Separator != '/' { + resolvedFileName = strings.Replace(resolvedFileName, string(filepath.Separator), "/", -1) + } + resolvedFileNames = append(resolvedFileNames, resolvedFileName) + } + return resolvedFileNames, nil +} + +func containsAbsFilePath(filePaths []string) bool { + for _, filePath := range filePaths { + if filepath.IsAbs(filePath) { + return true + } + } + return false +} + +func absoluteFilePaths(filePaths []string) ([]string, error) { + absFilePaths := make([]string, 0, len(filePaths)) + for _, filePath := range filePaths { + absFilePath, err := canonicalize(filePath) + if err != nil { + return nil, err + } + absFilePaths = append(absFilePaths, absFilePath) + } + return absFilePaths, nil +} + +func canonicalize(filePath string) (string, error) { + absPath, err := filepath.Abs(filePath) + if err != nil { + return "", err + } + // this is kind of gross, but it lets us construct a resolved path even if some + // path elements do not exist (a single call to filepath.EvalSymlinks would just + // return an error, ENOENT, in that case). + head := absPath + tail := "" + for { + noLinks, err := filepath.EvalSymlinks(head) + if err == nil { + if tail != "" { + return filepath.Join(noLinks, tail), nil + } + return noLinks, nil + } + + if tail == "" { + tail = filepath.Base(head) + } else { + tail = filepath.Join(filepath.Base(head), tail) + } + head = filepath.Dir(head) + if head == "." { + // ran out of path elements to try to resolve + return absPath, nil + } + } +} + +const dotPrefix = "." + string(filepath.Separator) +const dotDotPrefix = ".." + string(filepath.Separator) + +func resolveFilename(absImportPaths []string, fileName string) (string, error) { + if filepath.IsAbs(fileName) { + return resolveAbsFilename(absImportPaths, fileName) + } + + if !strings.HasPrefix(fileName, dotPrefix) && !strings.HasPrefix(fileName, dotDotPrefix) { + // Use of . and .. are assumed to be relative to current working + // directory. So if those aren't present, check to see if the file is + // relative to an import path. + for _, absImportPath := range absImportPaths { + absFileName := filepath.Join(absImportPath, fileName) + _, err := os.Stat(absFileName) + if err != nil { + continue + } + // found it! it was relative to this import path + return fileName, nil + } + } + + // must be relative to current working dir + return resolveAbsFilename(absImportPaths, fileName) +} + +func resolveAbsFilename(absImportPaths []string, fileName string) (string, error) { + absFileName, err := canonicalize(fileName) + if err != nil { + return "", err + } + for _, absImportPath := range absImportPaths { + if isDescendant(absImportPath, absFileName) { + resolvedPath, err := filepath.Rel(absImportPath, absFileName) + if err != nil { + return "", err + } + return resolvedPath, nil + } + } + return "", fmt.Errorf("%s does not reside in any import path", fileName) +} + +// isDescendant returns true if file is a descendant of dir. Both dir and file must +// be cleaned, absolute paths. +func isDescendant(dir, file string) bool { + dir = filepath.Clean(dir) + cur := file + for { + d := filepath.Dir(cur) + if d == dir { + return true + } + if d == "." || d == cur { + // we've run out of path elements + return false + } + cur = d + } +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt new file mode 100644 index 00000000..c9bc50b1 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt @@ -0,0 +1,6401 @@ +---- desc_test_comments.proto ---- + + +: +desc_test_comments.proto:8:1 +desc_test_comments.proto:156:2 + + + > syntax: +desc_test_comments.proto:8:1 +desc_test_comments.proto:8:19 + Leading detached comment [0]: + This is the first detached comment for the syntax. + + Leading detached comment [1]: + + This is a second detached comment. + + Leading detached comment [2]: + This is a third. + + Leading comments: + Syntax comment... + + Trailing comments: + Syntax trailer. + + + + > package: +desc_test_comments.proto:12:1 +desc_test_comments.proto:12:17 + Leading comments: + And now the package declaration + + + + > options: +desc_test_comments.proto:15:1 +desc_test_comments.proto:15:75 + + + > options > go_package: +desc_test_comments.proto:15:1 +desc_test_comments.proto:15:75 + Leading comments: + option comments FTW!!! + + + + > dependency[0]: +desc_test_comments.proto:17:1 +desc_test_comments.proto:17:45 + + + > public_dependency[0]: +desc_test_comments.proto:17:8 +desc_test_comments.proto:17:14 + + + > dependency[1]: +desc_test_comments.proto:18:1 +desc_test_comments.proto:18:34 + + + > message_type[0]: +desc_test_comments.proto:25:1 +desc_test_comments.proto:113:2 + Leading detached comment [0]: + Multiple white space lines (like above) cannot + be preserved... + + Leading comments: + We need a request for our RPC service below. + + + + > message_type[0] > name: +desc_test_comments.proto:25:68 +desc_test_comments.proto:25:75 + Leading comments: + request with a capital R + Trailing comments: + trailer + + + + > message_type[0] > options: +desc_test_comments.proto:26:9 +desc_test_comments.proto:26:34 + + + > message_type[0] > options > deprecated: +desc_test_comments.proto:26:9 +desc_test_comments.proto:26:34 + Trailing comments: + deprecated! + + + + > message_type[0] > field[0]: +desc_test_comments.proto:29:9 +desc_test_comments.proto:32:132 + Leading comments: + A field comment + + Trailing comments: + field trailer #1... + + + + > message_type[0] > field[0] > label: +desc_test_comments.proto:29:9 +desc_test_comments.proto:29:17 + + + > message_type[0] > field[0] > type: +desc_test_comments.proto:29:18 +desc_test_comments.proto:29:23 + + + > message_type[0] > field[0] > name: +desc_test_comments.proto:29:24 +desc_test_comments.proto:29:27 + + + > message_type[0] > field[0] > number: +desc_test_comments.proto:29:70 +desc_test_comments.proto:29:71 + Leading comments: + tag numero uno + Trailing comments: + tag trailer +that spans multiple lines... +more than two. + + + > message_type[0] > field[0] > options: +desc_test_comments.proto:32:11 +desc_test_comments.proto:32:131 + + + > message_type[0] > field[0] > options > packed: +desc_test_comments.proto:32:12 +desc_test_comments.proto:32:23 + Trailing comments: + packed! + + + > message_type[0] > field[0] > json_name: +desc_test_comments.proto:32:39 +desc_test_comments.proto:32:56 + Trailing comments: + custom JSON! + + + > message_type[0] > field[0] > options > (testprotos.ffubar)[0]: +desc_test_comments.proto:32:77 +desc_test_comments.proto:32:102 + + + > message_type[0] > field[0] > options > (testprotos.ffubarb): +desc_test_comments.proto:32:104 +desc_test_comments.proto:32:130 + + + > message_type[0] > options: +desc_test_comments.proto:35:27 +desc_test_comments.proto:35:61 + + + > message_type[0] > options > (testprotos.mfubar): +desc_test_comments.proto:35:27 +desc_test_comments.proto:35:61 + Leading comments: + lead mfubar + Trailing comments: + trailing mfubar + + + + > message_type[0] > field[1]: +desc_test_comments.proto:42:29 +desc_test_comments.proto:43:77 + Leading detached comment [0]: + some detached comments + + Leading detached comment [1]: + some detached comments with unicode 这个是值 + + Leading detached comment [2]: + Another field comment + + Leading comments: + label comment + + + > message_type[0] > field[1] > label: +desc_test_comments.proto:42:29 +desc_test_comments.proto:42:37 + + + > message_type[0] > field[1] > type: +desc_test_comments.proto:42:57 +desc_test_comments.proto:42:63 + Leading detached comment [0]: + type comment + + + > message_type[0] > field[1] > name: +desc_test_comments.proto:42:83 +desc_test_comments.proto:42:87 + Leading detached comment [0]: + name comment + + + > message_type[0] > field[1] > number: +desc_test_comments.proto:42:90 +desc_test_comments.proto:42:91 + + + > message_type[0] > field[1] > options: +desc_test_comments.proto:43:17 +desc_test_comments.proto:43:76 + + + > message_type[0] > field[1] > default_value: +desc_test_comments.proto:43:37 +desc_test_comments.proto:43:54 + Leading detached comment [0]: + default lead + Trailing comments: + default trail + + + > message_type[0] > extension_range: +desc_test_comments.proto:46:9 +desc_test_comments.proto:46:31 + Leading comments: + extension range comments are (sadly) not preserved + + + + > message_type[0] > extension_range[0]: +desc_test_comments.proto:46:20 +desc_test_comments.proto:46:30 + + + > message_type[0] > extension_range[0] > start: +desc_test_comments.proto:46:20 +desc_test_comments.proto:46:23 + + + > message_type[0] > extension_range[0] > end: +desc_test_comments.proto:46:27 +desc_test_comments.proto:46:30 + + + > message_type[0] > extension_range: +desc_test_comments.proto:47:9 +desc_test_comments.proto:47:109 + + + > message_type[0] > extension_range[1]: +desc_test_comments.proto:47:20 +desc_test_comments.proto:47:30 + + + > message_type[0] > extension_range[1] > start: +desc_test_comments.proto:47:20 +desc_test_comments.proto:47:23 + + + > message_type[0] > extension_range[1] > end: +desc_test_comments.proto:47:27 +desc_test_comments.proto:47:30 + + + > message_type[0] > extension_range[1] > options: +desc_test_comments.proto:47:31 +desc_test_comments.proto:47:108 + + + > message_type[0] > extension_range[1] > options > (testprotos.exfubarb): +desc_test_comments.proto:47:32 +desc_test_comments.proto:47:74 + + + > message_type[0] > extension_range[1] > options > (testprotos.exfubar)[0]: +desc_test_comments.proto:47:76 +desc_test_comments.proto:47:107 + + + > message_type[0] > reserved_range: +desc_test_comments.proto:51:48 +desc_test_comments.proto:51:77 + Leading detached comment [0]: + another detached comment + + Leading comments: + same for reserved range comments + + + > message_type[0] > reserved_range[0]: +desc_test_comments.proto:51:57 +desc_test_comments.proto:51:65 + + + > message_type[0] > reserved_range[0] > start: +desc_test_comments.proto:51:57 +desc_test_comments.proto:51:59 + + + > message_type[0] > reserved_range[0] > end: +desc_test_comments.proto:51:63 +desc_test_comments.proto:51:65 + + + > message_type[0] > reserved_range[1]: +desc_test_comments.proto:51:67 +desc_test_comments.proto:51:75 + + + > message_type[0] > reserved_range[1] > start: +desc_test_comments.proto:51:67 +desc_test_comments.proto:51:69 + + + > message_type[0] > reserved_range[1] > end: +desc_test_comments.proto:51:73 +desc_test_comments.proto:51:75 + + + > message_type[0] > reserved_name: +desc_test_comments.proto:52:9 +desc_test_comments.proto:52:38 + Trailing comments: + reserved trailers + + + > message_type[0] > reserved_name[0]: +desc_test_comments.proto:52:18 +desc_test_comments.proto:52:23 + + + > message_type[0] > reserved_name[1]: +desc_test_comments.proto:52:25 +desc_test_comments.proto:52:30 + + + > message_type[0] > reserved_name[2]: +desc_test_comments.proto:52:32 +desc_test_comments.proto:52:37 + + + > message_type[0] > field[2]: +desc_test_comments.proto:55:9 +desc_test_comments.proto:69:10 + + + > message_type[0] > field[2] > label: +desc_test_comments.proto:55:9 +desc_test_comments.proto:55:17 + + + > message_type[0] > field[2] > type: +desc_test_comments.proto:55:18 +desc_test_comments.proto:55:23 + + + > message_type[0] > field[2] > name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + + + > message_type[0] > field[2] > number: +desc_test_comments.proto:55:50 +desc_test_comments.proto:55:51 + + + > message_type[0] > nested_type[0]: +desc_test_comments.proto:55:9 +desc_test_comments.proto:69:10 + Leading comments: + Group comment with emoji 😀 😍 👻 ❤ 💯 💥 🐶 🦂 🥑 🍻 🌍 🚕 🪐 + + Trailing comments: + trailer for Extras + + + + > message_type[0] > nested_type[0] > name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + Leading detached comment [0]: + group name + + + > message_type[0] > field[2] > type_name: +desc_test_comments.proto:55:41 +desc_test_comments.proto:55:47 + + + > message_type[0] > nested_type[0] > options: +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 + + + > message_type[0] > nested_type[0] > options > (testprotos.mfubar): +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 + Leading comments: + this is a custom option + + + + > message_type[0] > nested_type[0] > field[0]: +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:41 + + + > message_type[0] > nested_type[0] > field[0] > label: +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:25 + + + > message_type[0] > nested_type[0] > field[0] > type: +desc_test_comments.proto:61:26 +desc_test_comments.proto:61:32 + + + > message_type[0] > nested_type[0] > field[0] > name: +desc_test_comments.proto:61:33 +desc_test_comments.proto:61:36 + + + > message_type[0] > nested_type[0] > field[0] > number: +desc_test_comments.proto:61:39 +desc_test_comments.proto:61:40 + + + > message_type[0] > nested_type[0] > field[1]: +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:40 + + + > message_type[0] > nested_type[0] > field[1] > label: +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:25 + + + > message_type[0] > nested_type[0] > field[1] > type: +desc_test_comments.proto:62:26 +desc_test_comments.proto:62:31 + + + > message_type[0] > nested_type[0] > field[1] > name: +desc_test_comments.proto:62:32 +desc_test_comments.proto:62:35 + + + > message_type[0] > nested_type[0] > field[1] > number: +desc_test_comments.proto:62:38 +desc_test_comments.proto:62:39 + + + > message_type[0] > nested_type[0] > options: +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 + + + > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor: +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 + + + > message_type[0] > nested_type[0] > field[2]: +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:41 + Leading comments: + Leading comment... + + Trailing comments: + Trailing comment... + + + + > message_type[0] > nested_type[0] > field[2] > label: +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:25 + + + > message_type[0] > nested_type[0] > field[2] > type: +desc_test_comments.proto:67:26 +desc_test_comments.proto:67:32 + + + > message_type[0] > nested_type[0] > field[2] > name: +desc_test_comments.proto:67:33 +desc_test_comments.proto:67:36 + + + > message_type[0] > nested_type[0] > field[2] > number: +desc_test_comments.proto:67:39 +desc_test_comments.proto:67:40 + + + > message_type[0] > enum_type[0]: +desc_test_comments.proto:71:9 +desc_test_comments.proto:93:10 + Trailing comments: + trailer for enum + + + + > message_type[0] > enum_type[0] > name: +desc_test_comments.proto:71:14 +desc_test_comments.proto:71:29 + Trailing comments: + "super"! + + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 + + + > message_type[0] > enum_type[0] > options > allow_alias: +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 + Leading comments: + allow_alias comments! + + + + > message_type[0] > enum_type[0] > value[0]: +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:86 + + + > message_type[0] > enum_type[0] > value[0] > name: +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:22 + + + > message_type[0] > enum_type[0] > value[0] > number: +desc_test_comments.proto:77:25 +desc_test_comments.proto:77:26 + + + > message_type[0] > enum_type[0] > value[0] > options: +desc_test_comments.proto:77:27 +desc_test_comments.proto:77:85 + + + > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubars): +desc_test_comments.proto:77:28 +desc_test_comments.proto:77:56 + + + > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubar): +desc_test_comments.proto:77:58 +desc_test_comments.proto:77:84 + + + > message_type[0] > enum_type[0] > value[1]: +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:100 + + + > message_type[0] > enum_type[0] > value[1] > name: +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:22 + + + > message_type[0] > enum_type[0] > value[1] > number: +desc_test_comments.proto:78:25 +desc_test_comments.proto:78:26 + + + > message_type[0] > enum_type[0] > value[1] > options: +desc_test_comments.proto:78:27 +desc_test_comments.proto:78:99 + + + > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaruf): +desc_test_comments.proto:78:29 +desc_test_comments.proto:78:57 + + + > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaru): +desc_test_comments.proto:78:73 +desc_test_comments.proto:78:98 + Leading detached comment [0]: + swoosh! + + + > message_type[0] > enum_type[0] > value[2]: +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:27 + + + > message_type[0] > enum_type[0] > value[2] > name: +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:22 + + + > message_type[0] > enum_type[0] > value[2] > number: +desc_test_comments.proto:79:25 +desc_test_comments.proto:79:26 + + + > message_type[0] > enum_type[0] > value[3]: +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:28 + + + > message_type[0] > enum_type[0] > value[3] > name: +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:23 + + + > message_type[0] > enum_type[0] > value[3] > number: +desc_test_comments.proto:80:26 +desc_test_comments.proto:80:27 + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 + + + > message_type[0] > enum_type[0] > options > (testprotos.efubars): +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 + + + > message_type[0] > enum_type[0] > value[4]: +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:27 + + + > message_type[0] > enum_type[0] > value[4] > name: +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:22 + + + > message_type[0] > enum_type[0] > value[4] > number: +desc_test_comments.proto:84:25 +desc_test_comments.proto:84:26 + + + > message_type[0] > enum_type[0] > value[5]: +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:29 + + + > message_type[0] > enum_type[0] > value[5] > name: +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:24 + + + > message_type[0] > enum_type[0] > value[5] > number: +desc_test_comments.proto:85:27 +desc_test_comments.proto:85:28 + + + > message_type[0] > enum_type[0] > value[6]: +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:60 + + + > message_type[0] > enum_type[0] > value[6] > name: +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:24 + + + > message_type[0] > enum_type[0] > value[6] > number: +desc_test_comments.proto:86:27 +desc_test_comments.proto:86:28 + + + > message_type[0] > enum_type[0] > value[6] > options: +desc_test_comments.proto:86:29 +desc_test_comments.proto:86:59 + + + > message_type[0] > enum_type[0] > value[6] > options > (testprotos.evfubarsf): +desc_test_comments.proto:86:30 +desc_test_comments.proto:86:58 + + + > message_type[0] > enum_type[0] > value[7]: +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:28 + + + > message_type[0] > enum_type[0] > value[7] > name: +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:23 + + + > message_type[0] > enum_type[0] > value[7] > number: +desc_test_comments.proto:87:26 +desc_test_comments.proto:87:27 + + + > message_type[0] > enum_type[0] > value[8]: +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:31 + + + > message_type[0] > enum_type[0] > value[8] > name: +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:26 + + + > message_type[0] > enum_type[0] > value[8] > number: +desc_test_comments.proto:88:29 +desc_test_comments.proto:88:30 + + + > message_type[0] > enum_type[0] > value[9]: +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:27 + + + > message_type[0] > enum_type[0] > value[9] > name: +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:22 + + + > message_type[0] > enum_type[0] > value[9] > number: +desc_test_comments.proto:89:25 +desc_test_comments.proto:89:26 + + + > message_type[0] > enum_type[0] > value[10]: +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:31 + + + > message_type[0] > enum_type[0] > value[10] > name: +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:23 + + + > message_type[0] > enum_type[0] > value[10] > number: +desc_test_comments.proto:90:26 +desc_test_comments.proto:90:30 + + + > message_type[0] > enum_type[0] > options: +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 + + + > message_type[0] > enum_type[0] > options > (testprotos.efubar): +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 + + + > message_type[0] > oneof_decl[0]: +desc_test_comments.proto:96:9 +desc_test_comments.proto:101:10 + Leading comments: + can be this or that + + Trailing comments: + trailer for oneof abc + + + + > message_type[0] > oneof_decl[0] > name: +desc_test_comments.proto:96:15 +desc_test_comments.proto:96:18 + + + > message_type[0] > field[3]: +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:33 + + + > message_type[0] > field[3] > type: +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:23 + + + > message_type[0] > field[3] > name: +desc_test_comments.proto:99:24 +desc_test_comments.proto:99:28 + + + > message_type[0] > field[3] > number: +desc_test_comments.proto:99:31 +desc_test_comments.proto:99:32 + + + > message_type[0] > field[4]: +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:32 + + + > message_type[0] > field[4] > type: +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:22 + + + > message_type[0] > field[4] > name: +desc_test_comments.proto:100:23 +desc_test_comments.proto:100:27 + + + > message_type[0] > field[4] > number: +desc_test_comments.proto:100:30 +desc_test_comments.proto:100:31 + + + > message_type[0] > oneof_decl[1]: +desc_test_comments.proto:103:9 +desc_test_comments.proto:109:10 + Leading comments: + can be these or those + + + + > message_type[0] > oneof_decl[1] > name: +desc_test_comments.proto:103:15 +desc_test_comments.proto:103:18 + + + > message_type[0] > oneof_decl[1] > options: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:89 + + + > message_type[0] > oneof_decl[1] > options > (testprotos.oofubar)[0]: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:89 + Leading comments: + whoops? + + + + > message_type[0] > field[5]: +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:34 + + + > message_type[0] > field[5] > type: +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:23 + + + > message_type[0] > field[5] > name: +desc_test_comments.proto:107:24 +desc_test_comments.proto:107:29 + + + > message_type[0] > field[5] > number: +desc_test_comments.proto:107:32 +desc_test_comments.proto:107:33 + + + > message_type[0] > field[6]: +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:33 + + + > message_type[0] > field[6] > type: +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:22 + + + > message_type[0] > field[6] > name: +desc_test_comments.proto:108:23 +desc_test_comments.proto:108:28 + + + > message_type[0] > field[6] > number: +desc_test_comments.proto:108:31 +desc_test_comments.proto:108:32 + + + > message_type[0] > field[7]: +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:40 + Leading comments: + map field + + + + > message_type[0] > field[7] > type_name: +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:28 + + + > message_type[0] > field[7] > name: +desc_test_comments.proto:112:29 +desc_test_comments.proto:112:35 + + + > message_type[0] > field[7] > number: +desc_test_comments.proto:112:38 +desc_test_comments.proto:112:39 + + + > extension: +desc_test_comments.proto:117:1 +desc_test_comments.proto:128:2 + Leading detached comment [0]: + And next we'll need some extensions... + + Trailing comments: + trailer for extend block + + + + > extension[0]: +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:37 + Leading comments: + comment for guid1 + + + + > extension[0] > extendee: +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 + Leading comments: + extendee comment + + + + > extension[0] > label: +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:17 + + + > extension[0] > type: +desc_test_comments.proto:125:18 +desc_test_comments.proto:125:24 + + + > extension[0] > name: +desc_test_comments.proto:125:25 +desc_test_comments.proto:125:30 + + + > extension[0] > number: +desc_test_comments.proto:125:33 +desc_test_comments.proto:125:36 + + + > extension[1]: +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:37 + Leading comments: + ... and a comment for guid2 + + + + > extension[1] > extendee: +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 + + + > extension[1] > label: +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:17 + + + > extension[1] > type: +desc_test_comments.proto:127:18 +desc_test_comments.proto:127:24 + + + > extension[1] > name: +desc_test_comments.proto:127:25 +desc_test_comments.proto:127:30 + + + > extension[1] > number: +desc_test_comments.proto:127:33 +desc_test_comments.proto:127:36 + + + > message_type[1]: +desc_test_comments.proto:131:1 +desc_test_comments.proto:131:115 + Trailing comments: + trailer for AnEmptyMessage + + + > message_type[1] > name: +desc_test_comments.proto:131:36 +desc_test_comments.proto:131:50 + Leading detached comment [0]: + name leading comment + + + > service[0]: +desc_test_comments.proto:134:1 +desc_test_comments.proto:156:2 + Leading comments: + Service comment + + Trailing comments: + service trailer + that spans multiple lines + + + + > service[0] > name: +desc_test_comments.proto:134:28 +desc_test_comments.proto:134:38 + Leading detached comment [0]: + service name + + + > service[0] > options: +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 + + + > service[0] > options > (testprotos.sfubar) > id: +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 + Leading comments: + option that sets field + + + + > service[0] > options: +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 + + + > service[0] > options > (testprotos.sfubar) > name: +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 + Leading comments: + another option that sets field + + + + > service[0] > options: +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 + + + > service[0] > options > deprecated: +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 + Trailing comments: + DEPRECATED! + + + + > service[0] > options: +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 + + + > service[0] > options > (testprotos.sfubare): +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 + + + > service[0] > method[0]: +desc_test_comments.proto:147:9 +desc_test_comments.proto:148:84 + Leading comments: + Method comment + + Trailing comments: + compact method trailer + + + + > service[0] > method[0] > name: +desc_test_comments.proto:147:28 +desc_test_comments.proto:147:40 + Leading detached comment [0]: + rpc name + + + > service[0] > method[0] > client_streaming: +desc_test_comments.proto:147:73 +desc_test_comments.proto:147:79 + Leading detached comment [0]: + comment B + + + > service[0] > method[0] > input_type: +desc_test_comments.proto:147:96 +desc_test_comments.proto:147:103 + Leading detached comment [0]: + comment C + + + > service[0] > method[0] > output_type: +desc_test_comments.proto:148:57 +desc_test_comments.proto:148:64 + Leading detached comment [0]: +comment E + + + > service[0] > method[1]: +desc_test_comments.proto:150:9 +desc_test_comments.proto:155:10 + Trailing comments: + trailer for method + + + + > service[0] > method[1] > name: +desc_test_comments.proto:150:13 +desc_test_comments.proto:150:21 + + + > service[0] > method[1] > input_type: +desc_test_comments.proto:150:23 +desc_test_comments.proto:150:30 + + + > service[0] > method[1] > output_type: +desc_test_comments.proto:150:41 +desc_test_comments.proto:150:62 + + + > service[0] > method[1] > options: +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 + + + > service[0] > method[1] > options > deprecated: +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 + Leading comments: + this RPC is deprecated! + + + + > service[0] > method[1] > options: +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 + + + > service[0] > method[1] > options > (testprotos.mtfubar)[0]: +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 + + + > service[0] > method[1] > options: +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 + + + > service[0] > method[1] > options > (testprotos.mtfubard): +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 +---- desc_test_complex.proto ---- + + +: +desc_test_complex.proto:1:1 +desc_test_complex.proto:298:2 + + + > syntax: +desc_test_complex.proto:1:1 +desc_test_complex.proto:1:19 + + + > package: +desc_test_complex.proto:3:1 +desc_test_complex.proto:3:17 + + + > options: +desc_test_complex.proto:5:1 +desc_test_complex.proto:5:73 + + + > options > go_package: +desc_test_complex.proto:5:1 +desc_test_complex.proto:5:73 + + + > dependency[0]: +desc_test_complex.proto:7:1 +desc_test_complex.proto:7:43 + + + > message_type[0]: +desc_test_complex.proto:9:1 +desc_test_complex.proto:14:2 + + + > message_type[0] > name: +desc_test_complex.proto:9:9 +desc_test_complex.proto:9:15 + + + > message_type[0] > field[0]: +desc_test_complex.proto:10:9 +desc_test_complex.proto:10:34 + + + > message_type[0] > field[0] > label: +desc_test_complex.proto:10:9 +desc_test_complex.proto:10:17 + + + > message_type[0] > field[0] > type: +desc_test_complex.proto:10:18 +desc_test_complex.proto:10:24 + + + > message_type[0] > field[0] > name: +desc_test_complex.proto:10:25 +desc_test_complex.proto:10:29 + + + > message_type[0] > field[0] > number: +desc_test_complex.proto:10:32 +desc_test_complex.proto:10:33 + + + > message_type[0] > field[1]: +desc_test_complex.proto:11:9 +desc_test_complex.proto:11:32 + + + > message_type[0] > field[1] > label: +desc_test_complex.proto:11:9 +desc_test_complex.proto:11:17 + + + > message_type[0] > field[1] > type: +desc_test_complex.proto:11:18 +desc_test_complex.proto:11:24 + + + > message_type[0] > field[1] > name: +desc_test_complex.proto:11:25 +desc_test_complex.proto:11:27 + + + > message_type[0] > field[1] > number: +desc_test_complex.proto:11:30 +desc_test_complex.proto:11:31 + + + > message_type[0] > field[2]: +desc_test_complex.proto:12:9 +desc_test_complex.proto:12:35 + Trailing comments: + default JSON name will be capitalized + + + + > message_type[0] > field[2] > label: +desc_test_complex.proto:12:9 +desc_test_complex.proto:12:17 + + + > message_type[0] > field[2] > type: +desc_test_complex.proto:12:18 +desc_test_complex.proto:12:23 + + + > message_type[0] > field[2] > name: +desc_test_complex.proto:12:24 +desc_test_complex.proto:12:30 + + + > message_type[0] > field[2] > number: +desc_test_complex.proto:12:33 +desc_test_complex.proto:12:34 + + + > message_type[0] > field[3]: +desc_test_complex.proto:13:9 +desc_test_complex.proto:13:29 + Trailing comments: + default JSON name will be empty(!) + + + + > message_type[0] > field[3] > label: +desc_test_complex.proto:13:9 +desc_test_complex.proto:13:17 + + + > message_type[0] > field[3] > type: +desc_test_complex.proto:13:18 +desc_test_complex.proto:13:22 + + + > message_type[0] > field[3] > name: +desc_test_complex.proto:13:23 +desc_test_complex.proto:13:24 + + + > message_type[0] > field[3] > number: +desc_test_complex.proto:13:27 +desc_test_complex.proto:13:28 + + + > extension: +desc_test_complex.proto:16:1 +desc_test_complex.proto:20:2 + + + > extension[0]: +desc_test_complex.proto:19:9 +desc_test_complex.proto:19:39 + + + > extension[0] > extendee: +desc_test_complex.proto:16:8 +desc_test_complex.proto:18:25 + + + > extension[0] > label: +desc_test_complex.proto:19:9 +desc_test_complex.proto:19:17 + + + > extension[0] > type: +desc_test_complex.proto:19:18 +desc_test_complex.proto:19:24 + + + > extension[0] > name: +desc_test_complex.proto:19:25 +desc_test_complex.proto:19:30 + + + > extension[0] > number: +desc_test_complex.proto:19:33 +desc_test_complex.proto:19:38 + + + > message_type[1]: +desc_test_complex.proto:22:1 +desc_test_complex.proto:61:2 + + + > message_type[1] > name: +desc_test_complex.proto:22:9 +desc_test_complex.proto:22:13 + + + > message_type[1] > field[0]: +desc_test_complex.proto:23:9 +desc_test_complex.proto:23:55 + + + > message_type[1] > field[0] > label: +desc_test_complex.proto:23:9 +desc_test_complex.proto:23:17 + + + > message_type[1] > field[0] > type: +desc_test_complex.proto:23:18 +desc_test_complex.proto:23:24 + + + > message_type[1] > field[0] > name: +desc_test_complex.proto:23:25 +desc_test_complex.proto:23:28 + + + > message_type[1] > field[0] > number: +desc_test_complex.proto:23:31 +desc_test_complex.proto:23:32 + + + > message_type[1] > field[0] > options: +desc_test_complex.proto:23:33 +desc_test_complex.proto:23:54 + + + > message_type[1] > field[0] > json_name: +desc_test_complex.proto:23:34 +desc_test_complex.proto:23:53 + + + > message_type[1] > field[1]: +desc_test_complex.proto:24:9 +desc_test_complex.proto:24:34 + + + > message_type[1] > field[1] > label: +desc_test_complex.proto:24:9 +desc_test_complex.proto:24:17 + + + > message_type[1] > field[1] > type: +desc_test_complex.proto:24:18 +desc_test_complex.proto:24:23 + + + > message_type[1] > field[1] > name: +desc_test_complex.proto:24:24 +desc_test_complex.proto:24:29 + + + > message_type[1] > field[1] > number: +desc_test_complex.proto:24:32 +desc_test_complex.proto:24:33 + + + > message_type[1] > field[2]: +desc_test_complex.proto:25:9 +desc_test_complex.proto:25:31 + + + > message_type[1] > field[2] > label: +desc_test_complex.proto:25:9 +desc_test_complex.proto:25:17 + + + > message_type[1] > field[2] > type_name: +desc_test_complex.proto:25:18 +desc_test_complex.proto:25:24 + + + > message_type[1] > field[2] > name: +desc_test_complex.proto:25:25 +desc_test_complex.proto:25:26 + + + > message_type[1] > field[2] > number: +desc_test_complex.proto:25:29 +desc_test_complex.proto:25:30 + + + > message_type[1] > field[3]: +desc_test_complex.proto:26:9 +desc_test_complex.proto:26:31 + + + > message_type[1] > field[3] > label: +desc_test_complex.proto:26:9 +desc_test_complex.proto:26:17 + + + > message_type[1] > field[3] > type_name: +desc_test_complex.proto:26:18 +desc_test_complex.proto:26:24 + + + > message_type[1] > field[3] > name: +desc_test_complex.proto:26:25 +desc_test_complex.proto:26:26 + + + > message_type[1] > field[3] > number: +desc_test_complex.proto:26:29 +desc_test_complex.proto:26:30 + + + > message_type[1] > field[4]: +desc_test_complex.proto:27:9 +desc_test_complex.proto:27:34 + + + > message_type[1] > field[4] > type_name: +desc_test_complex.proto:27:9 +desc_test_complex.proto:27:27 + + + > message_type[1] > field[4] > name: +desc_test_complex.proto:27:28 +desc_test_complex.proto:27:29 + + + > message_type[1] > field[4] > number: +desc_test_complex.proto:27:32 +desc_test_complex.proto:27:33 + + + > message_type[1] > field[5]: +desc_test_complex.proto:29:9 +desc_test_complex.proto:29:67 + + + > message_type[1] > field[5] > label: +desc_test_complex.proto:29:9 +desc_test_complex.proto:29:17 + + + > message_type[1] > field[5] > type: +desc_test_complex.proto:29:18 +desc_test_complex.proto:29:23 + + + > message_type[1] > field[5] > name: +desc_test_complex.proto:29:24 +desc_test_complex.proto:29:25 + + + > message_type[1] > field[5] > number: +desc_test_complex.proto:29:28 +desc_test_complex.proto:29:29 + + + > message_type[1] > field[5] > options: +desc_test_complex.proto:29:30 +desc_test_complex.proto:29:66 + + + > message_type[1] > field[5] > default_value: +desc_test_complex.proto:29:31 +desc_test_complex.proto:29:65 + + + > message_type[1] > extension_range: +desc_test_complex.proto:31:9 +desc_test_complex.proto:31:31 + + + > message_type[1] > extension_range[0]: +desc_test_complex.proto:31:20 +desc_test_complex.proto:31:30 + + + > message_type[1] > extension_range[0] > start: +desc_test_complex.proto:31:20 +desc_test_complex.proto:31:23 + + + > message_type[1] > extension_range[0] > end: +desc_test_complex.proto:31:27 +desc_test_complex.proto:31:30 + + + > message_type[1] > extension_range: +desc_test_complex.proto:33:9 +desc_test_complex.proto:33:91 + + + > message_type[1] > extension_range[1]: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[1] > start: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[1] > end: +desc_test_complex.proto:33:20 +desc_test_complex.proto:33:23 + + + > message_type[1] > extension_range[2]: +desc_test_complex.proto:33:25 +desc_test_complex.proto:33:35 + + + > message_type[1] > extension_range[2] > start: +desc_test_complex.proto:33:25 +desc_test_complex.proto:33:28 + + + > message_type[1] > extension_range[2] > end: +desc_test_complex.proto:33:32 +desc_test_complex.proto:33:35 + + + > message_type[1] > extension_range[3]: +desc_test_complex.proto:33:37 +desc_test_complex.proto:33:47 + + + > message_type[1] > extension_range[3] > start: +desc_test_complex.proto:33:37 +desc_test_complex.proto:33:40 + + + > message_type[1] > extension_range[3] > end: +desc_test_complex.proto:33:44 +desc_test_complex.proto:33:47 + + + > message_type[1] > extension_range[4]: +desc_test_complex.proto:33:49 +desc_test_complex.proto:33:61 + + + > message_type[1] > extension_range[4] > start: +desc_test_complex.proto:33:49 +desc_test_complex.proto:33:54 + + + > message_type[1] > extension_range[4] > end: +desc_test_complex.proto:33:58 +desc_test_complex.proto:33:61 + + + > message_type[1] > extension_range[1] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:90 + + + > message_type[1] > extension_range[1] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:89 + + + > message_type[1] > extension_range[2] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:90 + + + > message_type[1] > extension_range[2] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:89 + + + > message_type[1] > extension_range[3] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:90 + + + > message_type[1] > extension_range[3] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:89 + + + > message_type[1] > extension_range[4] > options: +desc_test_complex.proto:33:62 +desc_test_complex.proto:33:90 + + + > message_type[1] > extension_range[4] > options > (foo.bar.label): +desc_test_complex.proto:33:63 +desc_test_complex.proto:33:89 + + + > message_type[1] > nested_type[1]: +desc_test_complex.proto:35:9 +desc_test_complex.proto:60:10 + + + > message_type[1] > nested_type[1] > name: +desc_test_complex.proto:35:17 +desc_test_complex.proto:35:23 + + + > message_type[1] > nested_type[1] > extension: +desc_test_complex.proto:36:17 +desc_test_complex.proto:38:18 + + + > message_type[1] > nested_type[1] > extension[0]: +desc_test_complex.proto:37:25 +desc_test_complex.proto:37:56 + + + > message_type[1] > nested_type[1] > extension[0] > extendee: +desc_test_complex.proto:36:24 +desc_test_complex.proto:36:54 + + + > message_type[1] > nested_type[1] > extension[0] > label: +desc_test_complex.proto:37:25 +desc_test_complex.proto:37:33 + + + > message_type[1] > nested_type[1] > extension[0] > type: +desc_test_complex.proto:37:34 +desc_test_complex.proto:37:39 + + + > message_type[1] > nested_type[1] > extension[0] > name: +desc_test_complex.proto:37:40 +desc_test_complex.proto:37:47 + + + > message_type[1] > nested_type[1] > extension[0] > number: +desc_test_complex.proto:37:50 +desc_test_complex.proto:37:55 + + + > message_type[1] > nested_type[1] > nested_type[0]: +desc_test_complex.proto:39:17 +desc_test_complex.proto:59:18 + + + > message_type[1] > nested_type[1] > nested_type[0] > name: +desc_test_complex.proto:39:25 +desc_test_complex.proto:39:38 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0]: +desc_test_complex.proto:40:25 +desc_test_complex.proto:48:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > name: +desc_test_complex.proto:40:30 +desc_test_complex.proto:40:33 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0]: +desc_test_complex.proto:41:33 +desc_test_complex.proto:41:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > name: +desc_test_complex.proto:41:33 +desc_test_complex.proto:41:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > number: +desc_test_complex.proto:41:38 +desc_test_complex.proto:41:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1]: +desc_test_complex.proto:42:33 +desc_test_complex.proto:42:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > name: +desc_test_complex.proto:42:33 +desc_test_complex.proto:42:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > number: +desc_test_complex.proto:42:38 +desc_test_complex.proto:42:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2]: +desc_test_complex.proto:43:33 +desc_test_complex.proto:43:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > name: +desc_test_complex.proto:43:33 +desc_test_complex.proto:43:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > number: +desc_test_complex.proto:43:38 +desc_test_complex.proto:43:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3]: +desc_test_complex.proto:44:33 +desc_test_complex.proto:44:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > name: +desc_test_complex.proto:44:33 +desc_test_complex.proto:44:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > number: +desc_test_complex.proto:44:38 +desc_test_complex.proto:44:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4]: +desc_test_complex.proto:45:33 +desc_test_complex.proto:45:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > name: +desc_test_complex.proto:45:33 +desc_test_complex.proto:45:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > number: +desc_test_complex.proto:45:38 +desc_test_complex.proto:45:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5]: +desc_test_complex.proto:46:33 +desc_test_complex.proto:46:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > name: +desc_test_complex.proto:46:33 +desc_test_complex.proto:46:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > number: +desc_test_complex.proto:46:38 +desc_test_complex.proto:46:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6]: +desc_test_complex.proto:47:33 +desc_test_complex.proto:47:40 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > name: +desc_test_complex.proto:47:33 +desc_test_complex.proto:47:35 + + + > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > number: +desc_test_complex.proto:47:38 +desc_test_complex.proto:47:39 + + + > message_type[1] > nested_type[1] > nested_type[0] > options: +desc_test_complex.proto:49:25 +desc_test_complex.proto:49:50 + + + > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.Test.Nested.fooblez): +desc_test_complex.proto:49:25 +desc_test_complex.proto:49:50 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension: +desc_test_complex.proto:50:25 +desc_test_complex.proto:52:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0]: +desc_test_complex.proto:51:33 +desc_test_complex.proto:51:64 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > extendee: +desc_test_complex.proto:50:32 +desc_test_complex.proto:50:36 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > label: +desc_test_complex.proto:51:33 +desc_test_complex.proto:51:41 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > type: +desc_test_complex.proto:51:42 +desc_test_complex.proto:51:48 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > name: +desc_test_complex.proto:51:49 +desc_test_complex.proto:51:57 + + + > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > number: +desc_test_complex.proto:51:60 +desc_test_complex.proto:51:63 + + + > message_type[1] > nested_type[1] > nested_type[0] > options: +desc_test_complex.proto:53:25 +desc_test_complex.proto:53:108 + + + > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:53:25 +desc_test_complex.proto:53:108 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0]: +desc_test_complex.proto:54:25 +desc_test_complex.proto:58:26 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > name: +desc_test_complex.proto:54:33 +desc_test_complex.proto:54:51 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options: +desc_test_complex.proto:55:33 +desc_test_complex.proto:55:109 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:55:33 +desc_test_complex.proto:55:109 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0]: +desc_test_complex.proto:57:33 +desc_test_complex.proto:57:56 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > label: +desc_test_complex.proto:57:33 +desc_test_complex.proto:57:41 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > type_name: +desc_test_complex.proto:57:42 +desc_test_complex.proto:57:46 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > name: +desc_test_complex.proto:57:47 +desc_test_complex.proto:57:51 + + + > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > number: +desc_test_complex.proto:57:54 +desc_test_complex.proto:57:55 + + + > enum_type[0]: +desc_test_complex.proto:63:1 +desc_test_complex.proto:72:2 + + + > enum_type[0] > name: +desc_test_complex.proto:63:6 +desc_test_complex.proto:63:26 + + + > enum_type[0] > value[0]: +desc_test_complex.proto:64:9 +desc_test_complex.proto:64:15 + + + > enum_type[0] > value[0] > name: +desc_test_complex.proto:64:9 +desc_test_complex.proto:64:10 + + + > enum_type[0] > value[0] > number: +desc_test_complex.proto:64:13 +desc_test_complex.proto:64:14 + + + > enum_type[0] > value[1]: +desc_test_complex.proto:65:9 +desc_test_complex.proto:65:15 + + + > enum_type[0] > value[1] > name: +desc_test_complex.proto:65:9 +desc_test_complex.proto:65:10 + + + > enum_type[0] > value[1] > number: +desc_test_complex.proto:65:13 +desc_test_complex.proto:65:14 + + + > enum_type[0] > value[2]: +desc_test_complex.proto:66:9 +desc_test_complex.proto:66:15 + + + > enum_type[0] > value[2] > name: +desc_test_complex.proto:66:9 +desc_test_complex.proto:66:10 + + + > enum_type[0] > value[2] > number: +desc_test_complex.proto:66:13 +desc_test_complex.proto:66:14 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:67:9 +desc_test_complex.proto:67:30 + + + > enum_type[0] > reserved_range[0]: +desc_test_complex.proto:67:18 +desc_test_complex.proto:67:29 + + + > enum_type[0] > reserved_range[0] > start: +desc_test_complex.proto:67:18 +desc_test_complex.proto:67:22 + + + > enum_type[0] > reserved_range[0] > end: +desc_test_complex.proto:67:26 +desc_test_complex.proto:67:29 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:68:9 +desc_test_complex.proto:68:26 + + + > enum_type[0] > reserved_range[1]: +desc_test_complex.proto:68:18 +desc_test_complex.proto:68:25 + + + > enum_type[0] > reserved_range[1] > start: +desc_test_complex.proto:68:18 +desc_test_complex.proto:68:20 + + + > enum_type[0] > reserved_range[1] > end: +desc_test_complex.proto:68:24 +desc_test_complex.proto:68:25 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:69:9 +desc_test_complex.proto:69:40 + + + > enum_type[0] > reserved_range[2]: +desc_test_complex.proto:69:18 +desc_test_complex.proto:69:25 + + + > enum_type[0] > reserved_range[2] > start: +desc_test_complex.proto:69:18 +desc_test_complex.proto:69:19 + + + > enum_type[0] > reserved_range[2] > end: +desc_test_complex.proto:69:23 +desc_test_complex.proto:69:25 + + + > enum_type[0] > reserved_range[3]: +desc_test_complex.proto:69:27 +desc_test_complex.proto:69:35 + + + > enum_type[0] > reserved_range[3] > start: +desc_test_complex.proto:69:27 +desc_test_complex.proto:69:29 + + + > enum_type[0] > reserved_range[3] > end: +desc_test_complex.proto:69:33 +desc_test_complex.proto:69:35 + + + > enum_type[0] > reserved_range[4]: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range[4] > start: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range[4] > end: +desc_test_complex.proto:69:37 +desc_test_complex.proto:69:39 + + + > enum_type[0] > reserved_range: +desc_test_complex.proto:70:9 +desc_test_complex.proto:70:27 + + + > enum_type[0] > reserved_range[5]: +desc_test_complex.proto:70:18 +desc_test_complex.proto:70:26 + + + > enum_type[0] > reserved_range[5] > start: +desc_test_complex.proto:70:18 +desc_test_complex.proto:70:20 + + + > enum_type[0] > reserved_range[5] > end: +desc_test_complex.proto:70:24 +desc_test_complex.proto:70:26 + + + > enum_type[0] > reserved_name: +desc_test_complex.proto:71:9 +desc_test_complex.proto:71:32 + + + > enum_type[0] > reserved_name[0]: +desc_test_complex.proto:71:18 +desc_test_complex.proto:71:21 + + + > enum_type[0] > reserved_name[1]: +desc_test_complex.proto:71:23 +desc_test_complex.proto:71:26 + + + > enum_type[0] > reserved_name[2]: +desc_test_complex.proto:71:28 +desc_test_complex.proto:71:31 + + + > message_type[2]: +desc_test_complex.proto:74:1 +desc_test_complex.proto:78:2 + + + > message_type[2] > name: +desc_test_complex.proto:74:9 +desc_test_complex.proto:74:32 + + + > message_type[2] > reserved_range: +desc_test_complex.proto:75:9 +desc_test_complex.proto:75:40 + + + > message_type[2] > reserved_range[0]: +desc_test_complex.proto:75:18 +desc_test_complex.proto:75:25 + + + > message_type[2] > reserved_range[0] > start: +desc_test_complex.proto:75:18 +desc_test_complex.proto:75:19 + + + > message_type[2] > reserved_range[0] > end: +desc_test_complex.proto:75:23 +desc_test_complex.proto:75:25 + + + > message_type[2] > reserved_range[1]: +desc_test_complex.proto:75:27 +desc_test_complex.proto:75:35 + + + > message_type[2] > reserved_range[1] > start: +desc_test_complex.proto:75:27 +desc_test_complex.proto:75:29 + + + > message_type[2] > reserved_range[1] > end: +desc_test_complex.proto:75:33 +desc_test_complex.proto:75:35 + + + > message_type[2] > reserved_range[2]: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range[2] > start: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range[2] > end: +desc_test_complex.proto:75:37 +desc_test_complex.proto:75:39 + + + > message_type[2] > reserved_range: +desc_test_complex.proto:76:9 +desc_test_complex.proto:76:30 + + + > message_type[2] > reserved_range[3]: +desc_test_complex.proto:76:18 +desc_test_complex.proto:76:29 + + + > message_type[2] > reserved_range[3] > start: +desc_test_complex.proto:76:18 +desc_test_complex.proto:76:22 + + + > message_type[2] > reserved_range[3] > end: +desc_test_complex.proto:76:26 +desc_test_complex.proto:76:29 + + + > message_type[2] > reserved_name: +desc_test_complex.proto:77:9 +desc_test_complex.proto:77:32 + + + > message_type[2] > reserved_name[0]: +desc_test_complex.proto:77:18 +desc_test_complex.proto:77:21 + + + > message_type[2] > reserved_name[1]: +desc_test_complex.proto:77:23 +desc_test_complex.proto:77:26 + + + > message_type[2] > reserved_name[2]: +desc_test_complex.proto:77:28 +desc_test_complex.proto:77:31 + + + > message_type[3]: +desc_test_complex.proto:80:1 +desc_test_complex.proto:82:2 + + + > message_type[3] > name: +desc_test_complex.proto:80:9 +desc_test_complex.proto:80:23 + + + > message_type[3] > field[0]: +desc_test_complex.proto:81:9 +desc_test_complex.proto:81:38 + + + > message_type[3] > field[0] > type_name: +desc_test_complex.proto:81:9 +desc_test_complex.proto:81:28 + + + > message_type[3] > field[0] > name: +desc_test_complex.proto:81:29 +desc_test_complex.proto:81:33 + + + > message_type[3] > field[0] > number: +desc_test_complex.proto:81:36 +desc_test_complex.proto:81:37 + + + > extension: +desc_test_complex.proto:84:1 +desc_test_complex.proto:89:2 + + + > extension[1]: +desc_test_complex.proto:85:9 +desc_test_complex.proto:85:36 + + + > extension[1] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[1] > label: +desc_test_complex.proto:85:9 +desc_test_complex.proto:85:17 + + + > extension[1] > type_name: +desc_test_complex.proto:85:18 +desc_test_complex.proto:85:22 + + + > extension[1] > name: +desc_test_complex.proto:85:23 +desc_test_complex.proto:85:27 + + + > extension[1] > number: +desc_test_complex.proto:85:30 +desc_test_complex.proto:85:35 + + + > extension[2]: +desc_test_complex.proto:86:9 +desc_test_complex.proto:86:60 + + + > extension[2] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[2] > label: +desc_test_complex.proto:86:9 +desc_test_complex.proto:86:17 + + + > extension[2] > type_name: +desc_test_complex.proto:86:18 +desc_test_complex.proto:86:47 + + + > extension[2] > name: +desc_test_complex.proto:86:48 +desc_test_complex.proto:86:51 + + + > extension[2] > number: +desc_test_complex.proto:86:54 +desc_test_complex.proto:86:59 + + + > extension[3]: +desc_test_complex.proto:87:9 +desc_test_complex.proto:87:36 + + + > extension[3] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[3] > label: +desc_test_complex.proto:87:9 +desc_test_complex.proto:87:17 + + + > extension[3] > type_name: +desc_test_complex.proto:87:18 +desc_test_complex.proto:87:25 + + + > extension[3] > name: +desc_test_complex.proto:87:26 +desc_test_complex.proto:87:27 + + + > extension[3] > number: +desc_test_complex.proto:87:30 +desc_test_complex.proto:87:35 + + + > extension[4]: +desc_test_complex.proto:88:9 +desc_test_complex.proto:88:50 + + + > extension[4] > extendee: +desc_test_complex.proto:84:8 +desc_test_complex.proto:84:38 + + + > extension[4] > label: +desc_test_complex.proto:88:9 +desc_test_complex.proto:88:17 + + + > extension[4] > type_name: +desc_test_complex.proto:88:18 +desc_test_complex.proto:88:32 + + + > extension[4] > name: +desc_test_complex.proto:88:33 +desc_test_complex.proto:88:41 + + + > extension[4] > number: +desc_test_complex.proto:88:44 +desc_test_complex.proto:88:49 + + + > message_type[4]: +desc_test_complex.proto:91:1 +desc_test_complex.proto:111:2 + + + > message_type[4] > name: +desc_test_complex.proto:91:9 +desc_test_complex.proto:91:16 + + + > message_type[4] > options: +desc_test_complex.proto:92:5 +desc_test_complex.proto:92:130 + + + > message_type[4] > options > (foo.bar.rept)[0]: +desc_test_complex.proto:92:5 +desc_test_complex.proto:92:130 + + + > message_type[4] > options: +desc_test_complex.proto:93:5 +desc_test_complex.proto:93:115 + + + > message_type[4] > options > (foo.bar.rept)[1]: +desc_test_complex.proto:93:5 +desc_test_complex.proto:93:115 + + + > message_type[4] > options: +desc_test_complex.proto:94:5 +desc_test_complex.proto:94:36 + + + > message_type[4] > options > (foo.bar.rept)[2]: +desc_test_complex.proto:94:5 +desc_test_complex.proto:94:36 + + + > message_type[4] > options: +desc_test_complex.proto:95:5 +desc_test_complex.proto:95:23 + + + > message_type[4] > options > (foo.bar.eee): +desc_test_complex.proto:95:5 +desc_test_complex.proto:95:23 + + + > message_type[4] > options: +desc_test_complex.proto:96:9 +desc_test_complex.proto:96:34 + + + > message_type[4] > options > (foo.bar.a): +desc_test_complex.proto:96:9 +desc_test_complex.proto:96:34 + + + > message_type[4] > options: +desc_test_complex.proto:97:9 +desc_test_complex.proto:97:86 + + + > message_type[4] > options > (foo.bar.a) > test: +desc_test_complex.proto:97:9 +desc_test_complex.proto:97:86 + + + > message_type[4] > options: +desc_test_complex.proto:98:9 +desc_test_complex.proto:98:37 + + + > message_type[4] > options > (foo.bar.a) > test > foo: +desc_test_complex.proto:98:9 +desc_test_complex.proto:98:37 + + + > message_type[4] > options: +desc_test_complex.proto:99:9 +desc_test_complex.proto:99:41 + + + > message_type[4] > options > (foo.bar.a) > test > s > name: +desc_test_complex.proto:99:9 +desc_test_complex.proto:99:41 + + + > message_type[4] > options: +desc_test_complex.proto:100:5 +desc_test_complex.proto:100:34 + + + > message_type[4] > options > (foo.bar.a) > test > s > id: +desc_test_complex.proto:100:5 +desc_test_complex.proto:100:34 + + + > message_type[4] > options: +desc_test_complex.proto:101:5 +desc_test_complex.proto:101:31 + + + > message_type[4] > options > (foo.bar.a) > test > array[0]: +desc_test_complex.proto:101:5 +desc_test_complex.proto:101:31 + + + > message_type[4] > options: +desc_test_complex.proto:102:5 +desc_test_complex.proto:102:31 + + + > message_type[4] > options > (foo.bar.a) > test > array[1]: +desc_test_complex.proto:102:5 +desc_test_complex.proto:102:31 + + + > message_type[4] > options: +desc_test_complex.proto:103:5 +desc_test_complex.proto:103:78 + + + > message_type[4] > options > (foo.bar.a) > test > (foo.bar.Test.Nested._NestedNested._garblez): +desc_test_complex.proto:103:5 +desc_test_complex.proto:103:78 + + + > message_type[4] > options: +desc_test_complex.proto:105:9 +desc_test_complex.proto:105:37 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[0]: +desc_test_complex.proto:105:9 +desc_test_complex.proto:105:37 + Trailing comments: + no key, no value + + + + > message_type[4] > options: +desc_test_complex.proto:106:9 +desc_test_complex.proto:106:47 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[1]: +desc_test_complex.proto:106:9 +desc_test_complex.proto:106:47 + Trailing comments: + no value + + + + > message_type[4] > options: +desc_test_complex.proto:107:9 +desc_test_complex.proto:107:69 + + + > message_type[4] > options > (foo.bar.map_vals) > vals[2]: +desc_test_complex.proto:107:9 +desc_test_complex.proto:107:69 + + + > message_type[4] > field[0]: +desc_test_complex.proto:109:5 +desc_test_complex.proto:109:28 + + + > message_type[4] > field[0] > label: +desc_test_complex.proto:109:5 +desc_test_complex.proto:109:13 + + + > message_type[4] > field[0] > type_name: +desc_test_complex.proto:109:14 +desc_test_complex.proto:109:18 + + + > message_type[4] > field[0] > name: +desc_test_complex.proto:109:19 +desc_test_complex.proto:109:23 + + + > message_type[4] > field[0] > number: +desc_test_complex.proto:109:26 +desc_test_complex.proto:109:27 + + + > message_type[4] > field[1]: +desc_test_complex.proto:110:5 +desc_test_complex.proto:110:67 + + + > message_type[4] > field[1] > label: +desc_test_complex.proto:110:5 +desc_test_complex.proto:110:13 + + + > message_type[4] > field[1] > type_name: +desc_test_complex.proto:110:14 +desc_test_complex.proto:110:43 + + + > message_type[4] > field[1] > name: +desc_test_complex.proto:110:44 +desc_test_complex.proto:110:47 + + + > message_type[4] > field[1] > number: +desc_test_complex.proto:110:50 +desc_test_complex.proto:110:51 + + + > message_type[4] > field[1] > options: +desc_test_complex.proto:110:52 +desc_test_complex.proto:110:66 + + + > message_type[4] > field[1] > default_value: +desc_test_complex.proto:110:53 +desc_test_complex.proto:110:65 + + + > message_type[5]: +desc_test_complex.proto:113:1 +desc_test_complex.proto:127:2 + + + > message_type[5] > name: +desc_test_complex.proto:113:9 +desc_test_complex.proto:113:18 + + + > message_type[5] > field[0]: +desc_test_complex.proto:114:9 +desc_test_complex.proto:114:41 + + + > message_type[5] > field[0] > label: +desc_test_complex.proto:114:9 +desc_test_complex.proto:114:17 + + + > message_type[5] > field[0] > type: +desc_test_complex.proto:114:18 +desc_test_complex.proto:114:22 + + + > message_type[5] > field[0] > name: +desc_test_complex.proto:114:23 +desc_test_complex.proto:114:36 + + + > message_type[5] > field[0] > number: +desc_test_complex.proto:114:39 +desc_test_complex.proto:114:40 + + + > message_type[5] > enum_type[0]: +desc_test_complex.proto:116:9 +desc_test_complex.proto:120:10 + + + > message_type[5] > enum_type[0] > name: +desc_test_complex.proto:116:14 +desc_test_complex.proto:116:20 + + + > message_type[5] > enum_type[0] > value[0]: +desc_test_complex.proto:117:17 +desc_test_complex.proto:117:27 + + + > message_type[5] > enum_type[0] > value[0] > name: +desc_test_complex.proto:117:17 +desc_test_complex.proto:117:22 + + + > message_type[5] > enum_type[0] > value[0] > number: +desc_test_complex.proto:117:25 +desc_test_complex.proto:117:26 + + + > message_type[5] > enum_type[0] > value[1]: +desc_test_complex.proto:118:17 +desc_test_complex.proto:118:26 + + + > message_type[5] > enum_type[0] > value[1] > name: +desc_test_complex.proto:118:17 +desc_test_complex.proto:118:21 + + + > message_type[5] > enum_type[0] > value[1] > number: +desc_test_complex.proto:118:24 +desc_test_complex.proto:118:25 + + + > message_type[5] > enum_type[0] > value[2]: +desc_test_complex.proto:119:17 +desc_test_complex.proto:119:27 + + + > message_type[5] > enum_type[0] > value[2] > name: +desc_test_complex.proto:119:17 +desc_test_complex.proto:119:22 + + + > message_type[5] > enum_type[0] > value[2] > number: +desc_test_complex.proto:119:25 +desc_test_complex.proto:119:26 + + + > message_type[5] > nested_type[0]: +desc_test_complex.proto:121:9 +desc_test_complex.proto:124:10 + + + > message_type[5] > nested_type[0] > name: +desc_test_complex.proto:121:17 +desc_test_complex.proto:121:27 + + + > message_type[5] > nested_type[0] > field[0]: +desc_test_complex.proto:122:17 +desc_test_complex.proto:122:44 + + + > message_type[5] > nested_type[0] > field[0] > label: +desc_test_complex.proto:122:17 +desc_test_complex.proto:122:25 + + + > message_type[5] > nested_type[0] > field[0] > type_name: +desc_test_complex.proto:122:26 +desc_test_complex.proto:122:32 + + + > message_type[5] > nested_type[0] > field[0] > name: +desc_test_complex.proto:122:33 +desc_test_complex.proto:122:39 + + + > message_type[5] > nested_type[0] > field[0] > number: +desc_test_complex.proto:122:42 +desc_test_complex.proto:122:43 + + + > message_type[5] > nested_type[0] > field[1]: +desc_test_complex.proto:123:17 +desc_test_complex.proto:123:44 + + + > message_type[5] > nested_type[0] > field[1] > label: +desc_test_complex.proto:123:17 +desc_test_complex.proto:123:25 + + + > message_type[5] > nested_type[0] > field[1] > type: +desc_test_complex.proto:123:26 +desc_test_complex.proto:123:32 + + + > message_type[5] > nested_type[0] > field[1] > name: +desc_test_complex.proto:123:33 +desc_test_complex.proto:123:39 + + + > message_type[5] > nested_type[0] > field[1] > number: +desc_test_complex.proto:123:42 +desc_test_complex.proto:123:43 + + + > message_type[5] > field[1]: +desc_test_complex.proto:126:9 +desc_test_complex.proto:126:44 + + + > message_type[5] > field[1] > label: +desc_test_complex.proto:126:9 +desc_test_complex.proto:126:17 + + + > message_type[5] > field[1] > type_name: +desc_test_complex.proto:126:18 +desc_test_complex.proto:126:28 + + + > message_type[5] > field[1] > name: +desc_test_complex.proto:126:29 +desc_test_complex.proto:126:39 + + + > message_type[5] > field[1] > number: +desc_test_complex.proto:126:42 +desc_test_complex.proto:126:43 + + + > extension: +desc_test_complex.proto:129:1 +desc_test_complex.proto:131:2 + + + > extension[5]: +desc_test_complex.proto:130:9 +desc_test_complex.proto:130:46 + + + > extension[5] > extendee: +desc_test_complex.proto:129:8 +desc_test_complex.proto:129:37 + + + > extension[5] > label: +desc_test_complex.proto:130:9 +desc_test_complex.proto:130:17 + + + > extension[5] > type_name: +desc_test_complex.proto:130:18 +desc_test_complex.proto:130:27 + + + > extension[5] > name: +desc_test_complex.proto:130:28 +desc_test_complex.proto:130:37 + + + > extension[5] > number: +desc_test_complex.proto:130:40 +desc_test_complex.proto:130:45 + + + > service[0]: +desc_test_complex.proto:133:1 +desc_test_complex.proto:152:2 + + + > service[0] > name: +desc_test_complex.proto:133:9 +desc_test_complex.proto:133:24 + + + > service[0] > method[0]: +desc_test_complex.proto:134:9 +desc_test_complex.proto:142:10 + + + > service[0] > method[0] > name: +desc_test_complex.proto:134:13 +desc_test_complex.proto:134:21 + + + > service[0] > method[0] > input_type: +desc_test_complex.proto:134:22 +desc_test_complex.proto:134:26 + + + > service[0] > method[0] > output_type: +desc_test_complex.proto:134:37 +desc_test_complex.proto:134:41 + + + > service[0] > method[0] > options: +desc_test_complex.proto:135:17 +desc_test_complex.proto:141:19 + + + > service[0] > method[0] > options > (foo.bar.validator): +desc_test_complex.proto:135:17 +desc_test_complex.proto:141:19 + + + > service[0] > method[1]: +desc_test_complex.proto:143:9 +desc_test_complex.proto:151:10 + + + > service[0] > method[1] > name: +desc_test_complex.proto:143:13 +desc_test_complex.proto:143:16 + + + > service[0] > method[1] > input_type: +desc_test_complex.proto:143:17 +desc_test_complex.proto:143:21 + + + > service[0] > method[1] > output_type: +desc_test_complex.proto:143:32 +desc_test_complex.proto:143:36 + + + > service[0] > method[1] > options: +desc_test_complex.proto:144:17 +desc_test_complex.proto:150:19 + + + > service[0] > method[1] > options > (foo.bar.validator): +desc_test_complex.proto:144:17 +desc_test_complex.proto:150:19 + + + > message_type[6]: +desc_test_complex.proto:154:1 +desc_test_complex.proto:180:2 + + + > message_type[6] > name: +desc_test_complex.proto:154:9 +desc_test_complex.proto:154:13 + + + > message_type[6] > nested_type[0]: +desc_test_complex.proto:155:3 +desc_test_complex.proto:160:4 + + + > message_type[6] > nested_type[0] > name: +desc_test_complex.proto:155:11 +desc_test_complex.proto:155:21 + + + > message_type[6] > nested_type[0] > field[0]: +desc_test_complex.proto:156:5 +desc_test_complex.proto:156:33 + + + > message_type[6] > nested_type[0] > field[0] > label: +desc_test_complex.proto:156:5 +desc_test_complex.proto:156:13 + + + > message_type[6] > nested_type[0] > field[0] > type: +desc_test_complex.proto:156:14 +desc_test_complex.proto:156:20 + + + > message_type[6] > nested_type[0] > field[0] > name: +desc_test_complex.proto:156:21 +desc_test_complex.proto:156:28 + + + > message_type[6] > nested_type[0] > field[0] > number: +desc_test_complex.proto:156:31 +desc_test_complex.proto:156:32 + + + > message_type[6] > nested_type[0] > field[1]: +desc_test_complex.proto:157:5 +desc_test_complex.proto:157:35 + + + > message_type[6] > nested_type[0] > field[1] > label: +desc_test_complex.proto:157:5 +desc_test_complex.proto:157:13 + + + > message_type[6] > nested_type[0] > field[1] > type: +desc_test_complex.proto:157:14 +desc_test_complex.proto:157:18 + + + > message_type[6] > nested_type[0] > field[1] > name: +desc_test_complex.proto:157:19 +desc_test_complex.proto:157:30 + + + > message_type[6] > nested_type[0] > field[1] > number: +desc_test_complex.proto:157:33 +desc_test_complex.proto:157:34 + + + > message_type[6] > nested_type[0] > field[2]: +desc_test_complex.proto:158:5 +desc_test_complex.proto:158:32 + + + > message_type[6] > nested_type[0] > field[2] > label: +desc_test_complex.proto:158:5 +desc_test_complex.proto:158:13 + + + > message_type[6] > nested_type[0] > field[2] > type: +desc_test_complex.proto:158:14 +desc_test_complex.proto:158:19 + + + > message_type[6] > nested_type[0] > field[2] > name: +desc_test_complex.proto:158:20 +desc_test_complex.proto:158:27 + + + > message_type[6] > nested_type[0] > field[2] > number: +desc_test_complex.proto:158:30 +desc_test_complex.proto:158:31 + + + > message_type[6] > nested_type[0] > field[3]: +desc_test_complex.proto:159:5 +desc_test_complex.proto:159:32 + + + > message_type[6] > nested_type[0] > field[3] > label: +desc_test_complex.proto:159:5 +desc_test_complex.proto:159:13 + + + > message_type[6] > nested_type[0] > field[3] > type: +desc_test_complex.proto:159:14 +desc_test_complex.proto:159:19 + + + > message_type[6] > nested_type[0] > field[3] > name: +desc_test_complex.proto:159:20 +desc_test_complex.proto:159:27 + + + > message_type[6] > nested_type[0] > field[3] > number: +desc_test_complex.proto:159:30 +desc_test_complex.proto:159:31 + + + > message_type[6] > nested_type[1]: +desc_test_complex.proto:161:3 +desc_test_complex.proto:164:4 + + + > message_type[6] > nested_type[1] > name: +desc_test_complex.proto:161:11 +desc_test_complex.proto:161:18 + + + > message_type[6] > nested_type[1] > field[0]: +desc_test_complex.proto:162:5 +desc_test_complex.proto:162:32 + + + > message_type[6] > nested_type[1] > field[0] > label: +desc_test_complex.proto:162:5 +desc_test_complex.proto:162:13 + + + > message_type[6] > nested_type[1] > field[0] > type: +desc_test_complex.proto:162:14 +desc_test_complex.proto:162:19 + + + > message_type[6] > nested_type[1] > field[0] > name: +desc_test_complex.proto:162:20 +desc_test_complex.proto:162:27 + + + > message_type[6] > nested_type[1] > field[0] > number: +desc_test_complex.proto:162:30 +desc_test_complex.proto:162:31 + + + > message_type[6] > nested_type[1] > field[1]: +desc_test_complex.proto:163:5 +desc_test_complex.proto:163:33 + + + > message_type[6] > nested_type[1] > field[1] > label: +desc_test_complex.proto:163:5 +desc_test_complex.proto:163:13 + + + > message_type[6] > nested_type[1] > field[1] > type: +desc_test_complex.proto:163:14 +desc_test_complex.proto:163:20 + + + > message_type[6] > nested_type[1] > field[1] > name: +desc_test_complex.proto:163:21 +desc_test_complex.proto:163:28 + + + > message_type[6] > nested_type[1] > field[1] > number: +desc_test_complex.proto:163:31 +desc_test_complex.proto:163:32 + + + > message_type[6] > nested_type[2]: +desc_test_complex.proto:165:3 +desc_test_complex.proto:170:4 + + + > message_type[6] > nested_type[2] > name: +desc_test_complex.proto:165:11 +desc_test_complex.proto:165:23 + + + > message_type[6] > nested_type[2] > field[0]: +desc_test_complex.proto:166:5 +desc_test_complex.proto:166:35 + + + > message_type[6] > nested_type[2] > field[0] > label: +desc_test_complex.proto:166:5 +desc_test_complex.proto:166:13 + + + > message_type[6] > nested_type[2] > field[0] > type: +desc_test_complex.proto:166:14 +desc_test_complex.proto:166:18 + + + > message_type[6] > nested_type[2] > field[0] > name: +desc_test_complex.proto:166:19 +desc_test_complex.proto:166:30 + + + > message_type[6] > nested_type[2] > field[0] > number: +desc_test_complex.proto:166:33 +desc_test_complex.proto:166:34 + + + > message_type[6] > nested_type[2] > field[1]: +desc_test_complex.proto:167:5 +desc_test_complex.proto:167:34 + + + > message_type[6] > nested_type[2] > field[1] > label: +desc_test_complex.proto:167:5 +desc_test_complex.proto:167:13 + + + > message_type[6] > nested_type[2] > field[1] > type: +desc_test_complex.proto:167:14 +desc_test_complex.proto:167:19 + + + > message_type[6] > nested_type[2] > field[1] > name: +desc_test_complex.proto:167:20 +desc_test_complex.proto:167:29 + + + > message_type[6] > nested_type[2] > field[1] > number: +desc_test_complex.proto:167:32 +desc_test_complex.proto:167:33 + + + > message_type[6] > nested_type[2] > field[2]: +desc_test_complex.proto:168:5 +desc_test_complex.proto:168:34 + + + > message_type[6] > nested_type[2] > field[2] > label: +desc_test_complex.proto:168:5 +desc_test_complex.proto:168:13 + + + > message_type[6] > nested_type[2] > field[2] > type: +desc_test_complex.proto:168:14 +desc_test_complex.proto:168:19 + + + > message_type[6] > nested_type[2] > field[2] > name: +desc_test_complex.proto:168:20 +desc_test_complex.proto:168:29 + + + > message_type[6] > nested_type[2] > field[2] > number: +desc_test_complex.proto:168:32 +desc_test_complex.proto:168:33 + + + > message_type[6] > nested_type[2] > field[3]: +desc_test_complex.proto:169:5 +desc_test_complex.proto:169:29 + + + > message_type[6] > nested_type[2] > field[3] > label: +desc_test_complex.proto:169:5 +desc_test_complex.proto:169:13 + + + > message_type[6] > nested_type[2] > field[3] > type_name: +desc_test_complex.proto:169:14 +desc_test_complex.proto:169:18 + + + > message_type[6] > nested_type[2] > field[3] > name: +desc_test_complex.proto:169:19 +desc_test_complex.proto:169:24 + + + > message_type[6] > nested_type[2] > field[3] > number: +desc_test_complex.proto:169:27 +desc_test_complex.proto:169:28 + + + > message_type[6] > oneof_decl[0]: +desc_test_complex.proto:171:3 +desc_test_complex.proto:179:4 + + + > message_type[6] > oneof_decl[0] > name: +desc_test_complex.proto:171:9 +desc_test_complex.proto:171:13 + + + > message_type[6] > field[0]: +desc_test_complex.proto:172:5 +desc_test_complex.proto:172:27 + + + > message_type[6] > field[0] > type_name: +desc_test_complex.proto:172:5 +desc_test_complex.proto:172:15 + + + > message_type[6] > field[0] > name: +desc_test_complex.proto:172:16 +desc_test_complex.proto:172:22 + + + > message_type[6] > field[0] > number: +desc_test_complex.proto:172:25 +desc_test_complex.proto:172:26 + + + > message_type[6] > field[1]: +desc_test_complex.proto:173:5 +desc_test_complex.proto:173:31 + + + > message_type[6] > field[1] > type_name: +desc_test_complex.proto:173:5 +desc_test_complex.proto:173:17 + + + > message_type[6] > field[1] > name: +desc_test_complex.proto:173:18 +desc_test_complex.proto:173:26 + + + > message_type[6] > field[1] > number: +desc_test_complex.proto:173:29 +desc_test_complex.proto:173:30 + + + > message_type[6] > field[2]: +desc_test_complex.proto:174:5 +desc_test_complex.proto:174:21 + + + > message_type[6] > field[2] > type_name: +desc_test_complex.proto:174:5 +desc_test_complex.proto:174:12 + + + > message_type[6] > field[2] > name: +desc_test_complex.proto:174:13 +desc_test_complex.proto:174:16 + + + > message_type[6] > field[2] > number: +desc_test_complex.proto:174:19 +desc_test_complex.proto:174:20 + + + > message_type[6] > field[3]: +desc_test_complex.proto:175:9 +desc_test_complex.proto:178:10 + + + > message_type[6] > field[3] > type: +desc_test_complex.proto:175:9 +desc_test_complex.proto:175:14 + + + > message_type[6] > field[3] > name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > field[3] > number: +desc_test_complex.proto:175:27 +desc_test_complex.proto:175:28 + + + > message_type[6] > nested_type[3]: +desc_test_complex.proto:175:9 +desc_test_complex.proto:178:10 + + + > message_type[6] > nested_type[3] > name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > field[3] > type_name: +desc_test_complex.proto:175:15 +desc_test_complex.proto:175:24 + + + > message_type[6] > nested_type[3] > field[0]: +desc_test_complex.proto:176:17 +desc_test_complex.proto:176:45 + + + > message_type[6] > nested_type[3] > field[0] > label: +desc_test_complex.proto:176:17 +desc_test_complex.proto:176:25 + + + > message_type[6] > nested_type[3] > field[0] > type: +desc_test_complex.proto:176:26 +desc_test_complex.proto:176:32 + + + > message_type[6] > nested_type[3] > field[0] > name: +desc_test_complex.proto:176:33 +desc_test_complex.proto:176:40 + + + > message_type[6] > nested_type[3] > field[0] > number: +desc_test_complex.proto:176:43 +desc_test_complex.proto:176:44 + + + > message_type[6] > nested_type[3] > field[1]: +desc_test_complex.proto:177:17 +desc_test_complex.proto:177:45 + + + > message_type[6] > nested_type[3] > field[1] > label: +desc_test_complex.proto:177:17 +desc_test_complex.proto:177:25 + + + > message_type[6] > nested_type[3] > field[1] > type: +desc_test_complex.proto:177:26 +desc_test_complex.proto:177:32 + + + > message_type[6] > nested_type[3] > field[1] > name: +desc_test_complex.proto:177:33 +desc_test_complex.proto:177:40 + + + > message_type[6] > nested_type[3] > field[1] > number: +desc_test_complex.proto:177:43 +desc_test_complex.proto:177:44 + + + > extension: +desc_test_complex.proto:182:1 +desc_test_complex.proto:184:2 + + + > extension[6]: +desc_test_complex.proto:183:3 +desc_test_complex.proto:183:30 + + + > extension[6] > extendee: +desc_test_complex.proto:182:8 +desc_test_complex.proto:182:36 + + + > extension[6] > label: +desc_test_complex.proto:183:3 +desc_test_complex.proto:183:11 + + + > extension[6] > type_name: +desc_test_complex.proto:183:12 +desc_test_complex.proto:183:16 + + + > extension[6] > name: +desc_test_complex.proto:183:17 +desc_test_complex.proto:183:22 + + + > extension[6] > number: +desc_test_complex.proto:183:25 +desc_test_complex.proto:183:29 + + + > message_type[7]: +desc_test_complex.proto:186:1 +desc_test_complex.proto:192:2 + + + > message_type[7] > name: +desc_test_complex.proto:186:9 +desc_test_complex.proto:186:24 + + + > message_type[7] > field[0]: +desc_test_complex.proto:187:5 +desc_test_complex.proto:191:11 + + + > message_type[7] > field[0] > label: +desc_test_complex.proto:187:5 +desc_test_complex.proto:187:13 + + + > message_type[7] > field[0] > type: +desc_test_complex.proto:187:14 +desc_test_complex.proto:187:20 + + + > message_type[7] > field[0] > name: +desc_test_complex.proto:187:21 +desc_test_complex.proto:187:29 + + + > message_type[7] > field[0] > number: +desc_test_complex.proto:187:32 +desc_test_complex.proto:187:33 + + + > message_type[7] > field[0] > options: +desc_test_complex.proto:188:7 +desc_test_complex.proto:191:10 + + + > message_type[7] > field[0] > options > (foo.bar.rules) > repeated: +desc_test_complex.proto:188:8 +desc_test_complex.proto:191:9 + + + > message_type[8]: +desc_test_complex.proto:196:1 +desc_test_complex.proto:232:2 + Leading detached comment [0]: + tests cases where field names collide with keywords + + + + > message_type[8] > name: +desc_test_complex.proto:196:9 +desc_test_complex.proto:196:26 + + + > message_type[8] > field[0]: +desc_test_complex.proto:197:9 +desc_test_complex.proto:197:34 + + + > message_type[8] > field[0] > label: +desc_test_complex.proto:197:9 +desc_test_complex.proto:197:17 + + + > message_type[8] > field[0] > type: +desc_test_complex.proto:197:18 +desc_test_complex.proto:197:22 + + + > message_type[8] > field[0] > name: +desc_test_complex.proto:197:23 +desc_test_complex.proto:197:29 + + + > message_type[8] > field[0] > number: +desc_test_complex.proto:197:32 +desc_test_complex.proto:197:33 + + + > message_type[8] > field[1]: +desc_test_complex.proto:198:9 +desc_test_complex.proto:198:34 + + + > message_type[8] > field[1] > label: +desc_test_complex.proto:198:9 +desc_test_complex.proto:198:17 + + + > message_type[8] > field[1] > type: +desc_test_complex.proto:198:18 +desc_test_complex.proto:198:22 + + + > message_type[8] > field[1] > name: +desc_test_complex.proto:198:23 +desc_test_complex.proto:198:29 + + + > message_type[8] > field[1] > number: +desc_test_complex.proto:198:32 +desc_test_complex.proto:198:33 + + + > message_type[8] > field[2]: +desc_test_complex.proto:199:9 +desc_test_complex.proto:199:34 + + + > message_type[8] > field[2] > label: +desc_test_complex.proto:199:9 +desc_test_complex.proto:199:17 + + + > message_type[8] > field[2] > type: +desc_test_complex.proto:199:18 +desc_test_complex.proto:199:22 + + + > message_type[8] > field[2] > name: +desc_test_complex.proto:199:23 +desc_test_complex.proto:199:29 + + + > message_type[8] > field[2] > number: +desc_test_complex.proto:199:32 +desc_test_complex.proto:199:33 + + + > message_type[8] > field[3]: +desc_test_complex.proto:200:9 +desc_test_complex.proto:200:32 + + + > message_type[8] > field[3] > label: +desc_test_complex.proto:200:9 +desc_test_complex.proto:200:17 + + + > message_type[8] > field[3] > type: +desc_test_complex.proto:200:18 +desc_test_complex.proto:200:22 + + + > message_type[8] > field[3] > name: +desc_test_complex.proto:200:23 +desc_test_complex.proto:200:27 + + + > message_type[8] > field[3] > number: +desc_test_complex.proto:200:30 +desc_test_complex.proto:200:31 + + + > message_type[8] > field[4]: +desc_test_complex.proto:201:9 +desc_test_complex.proto:201:35 + + + > message_type[8] > field[4] > label: +desc_test_complex.proto:201:9 +desc_test_complex.proto:201:17 + + + > message_type[8] > field[4] > type: +desc_test_complex.proto:201:18 +desc_test_complex.proto:201:22 + + + > message_type[8] > field[4] > name: +desc_test_complex.proto:201:23 +desc_test_complex.proto:201:30 + + + > message_type[8] > field[4] > number: +desc_test_complex.proto:201:33 +desc_test_complex.proto:201:34 + + + > message_type[8] > field[5]: +desc_test_complex.proto:202:9 +desc_test_complex.proto:202:36 + + + > message_type[8] > field[5] > label: +desc_test_complex.proto:202:9 +desc_test_complex.proto:202:17 + + + > message_type[8] > field[5] > type: +desc_test_complex.proto:202:18 +desc_test_complex.proto:202:24 + + + > message_type[8] > field[5] > name: +desc_test_complex.proto:202:25 +desc_test_complex.proto:202:31 + + + > message_type[8] > field[5] > number: +desc_test_complex.proto:202:34 +desc_test_complex.proto:202:35 + + + > message_type[8] > field[6]: +desc_test_complex.proto:203:9 +desc_test_complex.proto:203:34 + + + > message_type[8] > field[6] > label: +desc_test_complex.proto:203:9 +desc_test_complex.proto:203:17 + + + > message_type[8] > field[6] > type: +desc_test_complex.proto:203:18 +desc_test_complex.proto:203:23 + + + > message_type[8] > field[6] > name: +desc_test_complex.proto:203:24 +desc_test_complex.proto:203:29 + + + > message_type[8] > field[6] > number: +desc_test_complex.proto:203:32 +desc_test_complex.proto:203:33 + + + > message_type[8] > field[7]: +desc_test_complex.proto:204:9 +desc_test_complex.proto:204:34 + + + > message_type[8] > field[7] > label: +desc_test_complex.proto:204:9 +desc_test_complex.proto:204:17 + + + > message_type[8] > field[7] > type: +desc_test_complex.proto:204:18 +desc_test_complex.proto:204:23 + + + > message_type[8] > field[7] > name: +desc_test_complex.proto:204:24 +desc_test_complex.proto:204:29 + + + > message_type[8] > field[7] > number: +desc_test_complex.proto:204:32 +desc_test_complex.proto:204:33 + + + > message_type[8] > field[8]: +desc_test_complex.proto:205:9 +desc_test_complex.proto:205:34 + + + > message_type[8] > field[8] > label: +desc_test_complex.proto:205:9 +desc_test_complex.proto:205:17 + + + > message_type[8] > field[8] > type: +desc_test_complex.proto:205:18 +desc_test_complex.proto:205:23 + + + > message_type[8] > field[8] > name: +desc_test_complex.proto:205:24 +desc_test_complex.proto:205:29 + + + > message_type[8] > field[8] > number: +desc_test_complex.proto:205:32 +desc_test_complex.proto:205:33 + + + > message_type[8] > field[9]: +desc_test_complex.proto:206:9 +desc_test_complex.proto:206:37 + + + > message_type[8] > field[9] > label: +desc_test_complex.proto:206:9 +desc_test_complex.proto:206:17 + + + > message_type[8] > field[9] > type: +desc_test_complex.proto:206:18 +desc_test_complex.proto:206:24 + + + > message_type[8] > field[9] > name: +desc_test_complex.proto:206:25 +desc_test_complex.proto:206:31 + + + > message_type[8] > field[9] > number: +desc_test_complex.proto:206:34 +desc_test_complex.proto:206:36 + + + > message_type[8] > field[10]: +desc_test_complex.proto:207:9 +desc_test_complex.proto:207:37 + + + > message_type[8] > field[10] > label: +desc_test_complex.proto:207:9 +desc_test_complex.proto:207:17 + + + > message_type[8] > field[10] > type: +desc_test_complex.proto:207:18 +desc_test_complex.proto:207:24 + + + > message_type[8] > field[10] > name: +desc_test_complex.proto:207:25 +desc_test_complex.proto:207:31 + + + > message_type[8] > field[10] > number: +desc_test_complex.proto:207:34 +desc_test_complex.proto:207:36 + + + > message_type[8] > field[11]: +desc_test_complex.proto:208:9 +desc_test_complex.proto:208:37 + + + > message_type[8] > field[11] > label: +desc_test_complex.proto:208:9 +desc_test_complex.proto:208:17 + + + > message_type[8] > field[11] > type: +desc_test_complex.proto:208:18 +desc_test_complex.proto:208:24 + + + > message_type[8] > field[11] > name: +desc_test_complex.proto:208:25 +desc_test_complex.proto:208:31 + + + > message_type[8] > field[11] > number: +desc_test_complex.proto:208:34 +desc_test_complex.proto:208:36 + + + > message_type[8] > field[12]: +desc_test_complex.proto:209:9 +desc_test_complex.proto:209:37 + + + > message_type[8] > field[12] > label: +desc_test_complex.proto:209:9 +desc_test_complex.proto:209:17 + + + > message_type[8] > field[12] > type: +desc_test_complex.proto:209:18 +desc_test_complex.proto:209:24 + + + > message_type[8] > field[12] > name: +desc_test_complex.proto:209:25 +desc_test_complex.proto:209:31 + + + > message_type[8] > field[12] > number: +desc_test_complex.proto:209:34 +desc_test_complex.proto:209:36 + + + > message_type[8] > field[13]: +desc_test_complex.proto:210:9 +desc_test_complex.proto:210:39 + + + > message_type[8] > field[13] > label: +desc_test_complex.proto:210:9 +desc_test_complex.proto:210:17 + + + > message_type[8] > field[13] > type: +desc_test_complex.proto:210:18 +desc_test_complex.proto:210:25 + + + > message_type[8] > field[13] > name: +desc_test_complex.proto:210:26 +desc_test_complex.proto:210:33 + + + > message_type[8] > field[13] > number: +desc_test_complex.proto:210:36 +desc_test_complex.proto:210:38 + + + > message_type[8] > field[14]: +desc_test_complex.proto:211:9 +desc_test_complex.proto:211:39 + + + > message_type[8] > field[14] > label: +desc_test_complex.proto:211:9 +desc_test_complex.proto:211:17 + + + > message_type[8] > field[14] > type: +desc_test_complex.proto:211:18 +desc_test_complex.proto:211:25 + + + > message_type[8] > field[14] > name: +desc_test_complex.proto:211:26 +desc_test_complex.proto:211:33 + + + > message_type[8] > field[14] > number: +desc_test_complex.proto:211:36 +desc_test_complex.proto:211:38 + + + > message_type[8] > field[15]: +desc_test_complex.proto:212:9 +desc_test_complex.proto:212:41 + + + > message_type[8] > field[15] > label: +desc_test_complex.proto:212:9 +desc_test_complex.proto:212:17 + + + > message_type[8] > field[15] > type: +desc_test_complex.proto:212:18 +desc_test_complex.proto:212:26 + + + > message_type[8] > field[15] > name: +desc_test_complex.proto:212:27 +desc_test_complex.proto:212:35 + + + > message_type[8] > field[15] > number: +desc_test_complex.proto:212:38 +desc_test_complex.proto:212:40 + + + > message_type[8] > field[16]: +desc_test_complex.proto:213:9 +desc_test_complex.proto:213:41 + + + > message_type[8] > field[16] > label: +desc_test_complex.proto:213:9 +desc_test_complex.proto:213:17 + + + > message_type[8] > field[16] > type: +desc_test_complex.proto:213:18 +desc_test_complex.proto:213:26 + + + > message_type[8] > field[16] > name: +desc_test_complex.proto:213:27 +desc_test_complex.proto:213:35 + + + > message_type[8] > field[16] > number: +desc_test_complex.proto:213:38 +desc_test_complex.proto:213:40 + + + > message_type[8] > field[17]: +desc_test_complex.proto:214:9 +desc_test_complex.proto:214:33 + + + > message_type[8] > field[17] > label: +desc_test_complex.proto:214:9 +desc_test_complex.proto:214:17 + + + > message_type[8] > field[17] > type: +desc_test_complex.proto:214:18 +desc_test_complex.proto:214:22 + + + > message_type[8] > field[17] > name: +desc_test_complex.proto:214:23 +desc_test_complex.proto:214:27 + + + > message_type[8] > field[17] > number: +desc_test_complex.proto:214:30 +desc_test_complex.proto:214:32 + + + > message_type[8] > field[18]: +desc_test_complex.proto:215:9 +desc_test_complex.proto:215:35 + + + > message_type[8] > field[18] > label: +desc_test_complex.proto:215:9 +desc_test_complex.proto:215:17 + + + > message_type[8] > field[18] > type: +desc_test_complex.proto:215:18 +desc_test_complex.proto:215:23 + + + > message_type[8] > field[18] > name: +desc_test_complex.proto:215:24 +desc_test_complex.proto:215:29 + + + > message_type[8] > field[18] > number: +desc_test_complex.proto:215:32 +desc_test_complex.proto:215:34 + + + > message_type[8] > field[19]: +desc_test_complex.proto:216:9 +desc_test_complex.proto:216:37 + + + > message_type[8] > field[19] > label: +desc_test_complex.proto:216:9 +desc_test_complex.proto:216:17 + + + > message_type[8] > field[19] > type: +desc_test_complex.proto:216:18 +desc_test_complex.proto:216:24 + + + > message_type[8] > field[19] > name: +desc_test_complex.proto:216:25 +desc_test_complex.proto:216:31 + + + > message_type[8] > field[19] > number: +desc_test_complex.proto:216:34 +desc_test_complex.proto:216:36 + + + > message_type[8] > field[20]: +desc_test_complex.proto:217:9 +desc_test_complex.proto:217:37 + + + > message_type[8] > field[20] > label: +desc_test_complex.proto:217:9 +desc_test_complex.proto:217:17 + + + > message_type[8] > field[20] > type: +desc_test_complex.proto:217:18 +desc_test_complex.proto:217:22 + + + > message_type[8] > field[20] > name: +desc_test_complex.proto:217:23 +desc_test_complex.proto:217:31 + + + > message_type[8] > field[20] > number: +desc_test_complex.proto:217:34 +desc_test_complex.proto:217:36 + + + > message_type[8] > field[21]: +desc_test_complex.proto:218:9 +desc_test_complex.proto:218:37 + + + > message_type[8] > field[21] > label: +desc_test_complex.proto:218:9 +desc_test_complex.proto:218:17 + + + > message_type[8] > field[21] > type: +desc_test_complex.proto:218:18 +desc_test_complex.proto:218:22 + + + > message_type[8] > field[21] > name: +desc_test_complex.proto:218:23 +desc_test_complex.proto:218:31 + + + > message_type[8] > field[21] > number: +desc_test_complex.proto:218:34 +desc_test_complex.proto:218:36 + + + > message_type[8] > field[22]: +desc_test_complex.proto:219:9 +desc_test_complex.proto:219:37 + + + > message_type[8] > field[22] > label: +desc_test_complex.proto:219:9 +desc_test_complex.proto:219:17 + + + > message_type[8] > field[22] > type: +desc_test_complex.proto:219:18 +desc_test_complex.proto:219:22 + + + > message_type[8] > field[22] > name: +desc_test_complex.proto:219:23 +desc_test_complex.proto:219:31 + + + > message_type[8] > field[22] > number: +desc_test_complex.proto:219:34 +desc_test_complex.proto:219:36 + + + > message_type[8] > field[23]: +desc_test_complex.proto:220:9 +desc_test_complex.proto:220:36 + + + > message_type[8] > field[23] > label: +desc_test_complex.proto:220:9 +desc_test_complex.proto:220:17 + + + > message_type[8] > field[23] > type: +desc_test_complex.proto:220:18 +desc_test_complex.proto:220:22 + + + > message_type[8] > field[23] > name: +desc_test_complex.proto:220:23 +desc_test_complex.proto:220:30 + + + > message_type[8] > field[23] > number: +desc_test_complex.proto:220:33 +desc_test_complex.proto:220:35 + + + > message_type[8] > field[24]: +desc_test_complex.proto:221:9 +desc_test_complex.proto:221:33 + + + > message_type[8] > field[24] > label: +desc_test_complex.proto:221:9 +desc_test_complex.proto:221:17 + + + > message_type[8] > field[24] > type: +desc_test_complex.proto:221:18 +desc_test_complex.proto:221:22 + + + > message_type[8] > field[24] > name: +desc_test_complex.proto:221:23 +desc_test_complex.proto:221:27 + + + > message_type[8] > field[24] > number: +desc_test_complex.proto:221:30 +desc_test_complex.proto:221:32 + + + > message_type[8] > field[25]: +desc_test_complex.proto:222:9 +desc_test_complex.proto:222:36 + + + > message_type[8] > field[25] > label: +desc_test_complex.proto:222:9 +desc_test_complex.proto:222:17 + + + > message_type[8] > field[25] > type: +desc_test_complex.proto:222:18 +desc_test_complex.proto:222:22 + + + > message_type[8] > field[25] > name: +desc_test_complex.proto:222:23 +desc_test_complex.proto:222:30 + + + > message_type[8] > field[25] > number: +desc_test_complex.proto:222:33 +desc_test_complex.proto:222:35 + + + > message_type[8] > field[26]: +desc_test_complex.proto:223:9 +desc_test_complex.proto:223:32 + + + > message_type[8] > field[26] > label: +desc_test_complex.proto:223:9 +desc_test_complex.proto:223:17 + + + > message_type[8] > field[26] > type: +desc_test_complex.proto:223:18 +desc_test_complex.proto:223:22 + + + > message_type[8] > field[26] > name: +desc_test_complex.proto:223:23 +desc_test_complex.proto:223:26 + + + > message_type[8] > field[26] > number: +desc_test_complex.proto:223:29 +desc_test_complex.proto:223:31 + + + > message_type[8] > field[27]: +desc_test_complex.proto:224:9 +desc_test_complex.proto:224:35 + + + > message_type[8] > field[27] > label: +desc_test_complex.proto:224:9 +desc_test_complex.proto:224:17 + + + > message_type[8] > field[27] > type: +desc_test_complex.proto:224:18 +desc_test_complex.proto:224:22 + + + > message_type[8] > field[27] > name: +desc_test_complex.proto:224:23 +desc_test_complex.proto:224:29 + + + > message_type[8] > field[27] > number: +desc_test_complex.proto:224:32 +desc_test_complex.proto:224:34 + + + > message_type[8] > field[28]: +desc_test_complex.proto:225:9 +desc_test_complex.proto:225:35 + + + > message_type[8] > field[28] > label: +desc_test_complex.proto:225:9 +desc_test_complex.proto:225:17 + + + > message_type[8] > field[28] > type: +desc_test_complex.proto:225:18 +desc_test_complex.proto:225:22 + + + > message_type[8] > field[28] > name: +desc_test_complex.proto:225:23 +desc_test_complex.proto:225:29 + + + > message_type[8] > field[28] > number: +desc_test_complex.proto:225:32 +desc_test_complex.proto:225:34 + + + > message_type[8] > field[29]: +desc_test_complex.proto:226:9 +desc_test_complex.proto:226:39 + + + > message_type[8] > field[29] > label: +desc_test_complex.proto:226:9 +desc_test_complex.proto:226:17 + + + > message_type[8] > field[29] > type: +desc_test_complex.proto:226:18 +desc_test_complex.proto:226:22 + + + > message_type[8] > field[29] > name: +desc_test_complex.proto:226:23 +desc_test_complex.proto:226:33 + + + > message_type[8] > field[29] > number: +desc_test_complex.proto:226:36 +desc_test_complex.proto:226:38 + + + > message_type[8] > field[30]: +desc_test_complex.proto:227:9 +desc_test_complex.proto:227:37 + + + > message_type[8] > field[30] > label: +desc_test_complex.proto:227:9 +desc_test_complex.proto:227:17 + + + > message_type[8] > field[30] > type: +desc_test_complex.proto:227:18 +desc_test_complex.proto:227:22 + + + > message_type[8] > field[30] > name: +desc_test_complex.proto:227:23 +desc_test_complex.proto:227:31 + + + > message_type[8] > field[30] > number: +desc_test_complex.proto:227:34 +desc_test_complex.proto:227:36 + + + > message_type[8] > field[31]: +desc_test_complex.proto:228:9 +desc_test_complex.proto:228:31 + + + > message_type[8] > field[31] > label: +desc_test_complex.proto:228:9 +desc_test_complex.proto:228:17 + + + > message_type[8] > field[31] > type: +desc_test_complex.proto:228:18 +desc_test_complex.proto:228:22 + + + > message_type[8] > field[31] > name: +desc_test_complex.proto:228:23 +desc_test_complex.proto:228:25 + + + > message_type[8] > field[31] > number: +desc_test_complex.proto:228:28 +desc_test_complex.proto:228:30 + + + > message_type[8] > field[32]: +desc_test_complex.proto:229:9 +desc_test_complex.proto:229:34 + + + > message_type[8] > field[32] > label: +desc_test_complex.proto:229:9 +desc_test_complex.proto:229:17 + + + > message_type[8] > field[32] > type: +desc_test_complex.proto:229:18 +desc_test_complex.proto:229:23 + + + > message_type[8] > field[32] > name: +desc_test_complex.proto:229:24 +desc_test_complex.proto:229:28 + + + > message_type[8] > field[32] > number: +desc_test_complex.proto:229:31 +desc_test_complex.proto:229:33 + + + > message_type[8] > field[33]: +desc_test_complex.proto:230:9 +desc_test_complex.proto:230:35 + + + > message_type[8] > field[33] > label: +desc_test_complex.proto:230:9 +desc_test_complex.proto:230:17 + + + > message_type[8] > field[33] > type: +desc_test_complex.proto:230:18 +desc_test_complex.proto:230:23 + + + > message_type[8] > field[33] > name: +desc_test_complex.proto:230:24 +desc_test_complex.proto:230:29 + + + > message_type[8] > field[33] > number: +desc_test_complex.proto:230:32 +desc_test_complex.proto:230:34 + + + > message_type[8] > field[34]: +desc_test_complex.proto:231:9 +desc_test_complex.proto:231:37 + + + > message_type[8] > field[34] > label: +desc_test_complex.proto:231:9 +desc_test_complex.proto:231:17 + + + > message_type[8] > field[34] > type: +desc_test_complex.proto:231:18 +desc_test_complex.proto:231:23 + + + > message_type[8] > field[34] > name: +desc_test_complex.proto:231:24 +desc_test_complex.proto:231:31 + + + > message_type[8] > field[34] > number: +desc_test_complex.proto:231:34 +desc_test_complex.proto:231:36 + + + > extension: +desc_test_complex.proto:234:1 +desc_test_complex.proto:271:2 + + + > extension[7]: +desc_test_complex.proto:235:9 +desc_test_complex.proto:235:38 + + + > extension[7] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[7] > label: +desc_test_complex.proto:235:9 +desc_test_complex.proto:235:17 + + + > extension[7] > type: +desc_test_complex.proto:235:18 +desc_test_complex.proto:235:22 + + + > extension[7] > name: +desc_test_complex.proto:235:23 +desc_test_complex.proto:235:29 + + + > extension[7] > number: +desc_test_complex.proto:235:32 +desc_test_complex.proto:235:37 + + + > extension[8]: +desc_test_complex.proto:236:9 +desc_test_complex.proto:236:38 + + + > extension[8] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[8] > label: +desc_test_complex.proto:236:9 +desc_test_complex.proto:236:17 + + + > extension[8] > type: +desc_test_complex.proto:236:18 +desc_test_complex.proto:236:22 + + + > extension[8] > name: +desc_test_complex.proto:236:23 +desc_test_complex.proto:236:29 + + + > extension[8] > number: +desc_test_complex.proto:236:32 +desc_test_complex.proto:236:37 + + + > extension[9]: +desc_test_complex.proto:237:9 +desc_test_complex.proto:237:38 + + + > extension[9] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[9] > label: +desc_test_complex.proto:237:9 +desc_test_complex.proto:237:17 + + + > extension[9] > type: +desc_test_complex.proto:237:18 +desc_test_complex.proto:237:22 + + + > extension[9] > name: +desc_test_complex.proto:237:23 +desc_test_complex.proto:237:29 + + + > extension[9] > number: +desc_test_complex.proto:237:32 +desc_test_complex.proto:237:37 + + + > extension[10]: +desc_test_complex.proto:238:9 +desc_test_complex.proto:238:36 + + + > extension[10] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[10] > label: +desc_test_complex.proto:238:9 +desc_test_complex.proto:238:17 + + + > extension[10] > type: +desc_test_complex.proto:238:18 +desc_test_complex.proto:238:22 + + + > extension[10] > name: +desc_test_complex.proto:238:23 +desc_test_complex.proto:238:27 + + + > extension[10] > number: +desc_test_complex.proto:238:30 +desc_test_complex.proto:238:35 + + + > extension[11]: +desc_test_complex.proto:239:9 +desc_test_complex.proto:239:39 + + + > extension[11] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[11] > label: +desc_test_complex.proto:239:9 +desc_test_complex.proto:239:17 + + + > extension[11] > type: +desc_test_complex.proto:239:18 +desc_test_complex.proto:239:22 + + + > extension[11] > name: +desc_test_complex.proto:239:23 +desc_test_complex.proto:239:30 + + + > extension[11] > number: +desc_test_complex.proto:239:33 +desc_test_complex.proto:239:38 + + + > extension[12]: +desc_test_complex.proto:240:9 +desc_test_complex.proto:240:40 + + + > extension[12] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[12] > label: +desc_test_complex.proto:240:9 +desc_test_complex.proto:240:17 + + + > extension[12] > type: +desc_test_complex.proto:240:18 +desc_test_complex.proto:240:24 + + + > extension[12] > name: +desc_test_complex.proto:240:25 +desc_test_complex.proto:240:31 + + + > extension[12] > number: +desc_test_complex.proto:240:34 +desc_test_complex.proto:240:39 + + + > extension[13]: +desc_test_complex.proto:241:9 +desc_test_complex.proto:241:38 + + + > extension[13] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[13] > label: +desc_test_complex.proto:241:9 +desc_test_complex.proto:241:17 + + + > extension[13] > type: +desc_test_complex.proto:241:18 +desc_test_complex.proto:241:23 + + + > extension[13] > name: +desc_test_complex.proto:241:24 +desc_test_complex.proto:241:29 + + + > extension[13] > number: +desc_test_complex.proto:241:32 +desc_test_complex.proto:241:37 + + + > extension[14]: +desc_test_complex.proto:242:9 +desc_test_complex.proto:242:38 + + + > extension[14] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[14] > label: +desc_test_complex.proto:242:9 +desc_test_complex.proto:242:17 + + + > extension[14] > type: +desc_test_complex.proto:242:18 +desc_test_complex.proto:242:23 + + + > extension[14] > name: +desc_test_complex.proto:242:24 +desc_test_complex.proto:242:29 + + + > extension[14] > number: +desc_test_complex.proto:242:32 +desc_test_complex.proto:242:37 + + + > extension[15]: +desc_test_complex.proto:243:9 +desc_test_complex.proto:243:38 + + + > extension[15] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[15] > label: +desc_test_complex.proto:243:9 +desc_test_complex.proto:243:17 + + + > extension[15] > type: +desc_test_complex.proto:243:18 +desc_test_complex.proto:243:23 + + + > extension[15] > name: +desc_test_complex.proto:243:24 +desc_test_complex.proto:243:29 + + + > extension[15] > number: +desc_test_complex.proto:243:32 +desc_test_complex.proto:243:37 + + + > extension[16]: +desc_test_complex.proto:244:9 +desc_test_complex.proto:244:40 + + + > extension[16] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[16] > label: +desc_test_complex.proto:244:9 +desc_test_complex.proto:244:17 + + + > extension[16] > type: +desc_test_complex.proto:244:18 +desc_test_complex.proto:244:24 + + + > extension[16] > name: +desc_test_complex.proto:244:25 +desc_test_complex.proto:244:31 + + + > extension[16] > number: +desc_test_complex.proto:244:34 +desc_test_complex.proto:244:39 + + + > extension[17]: +desc_test_complex.proto:245:9 +desc_test_complex.proto:245:40 + + + > extension[17] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[17] > label: +desc_test_complex.proto:245:9 +desc_test_complex.proto:245:17 + + + > extension[17] > type: +desc_test_complex.proto:245:18 +desc_test_complex.proto:245:24 + + + > extension[17] > name: +desc_test_complex.proto:245:25 +desc_test_complex.proto:245:31 + + + > extension[17] > number: +desc_test_complex.proto:245:34 +desc_test_complex.proto:245:39 + + + > extension[18]: +desc_test_complex.proto:246:9 +desc_test_complex.proto:246:40 + + + > extension[18] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[18] > label: +desc_test_complex.proto:246:9 +desc_test_complex.proto:246:17 + + + > extension[18] > type: +desc_test_complex.proto:246:18 +desc_test_complex.proto:246:24 + + + > extension[18] > name: +desc_test_complex.proto:246:25 +desc_test_complex.proto:246:31 + + + > extension[18] > number: +desc_test_complex.proto:246:34 +desc_test_complex.proto:246:39 + + + > extension[19]: +desc_test_complex.proto:247:9 +desc_test_complex.proto:247:40 + + + > extension[19] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[19] > label: +desc_test_complex.proto:247:9 +desc_test_complex.proto:247:17 + + + > extension[19] > type: +desc_test_complex.proto:247:18 +desc_test_complex.proto:247:24 + + + > extension[19] > name: +desc_test_complex.proto:247:25 +desc_test_complex.proto:247:31 + + + > extension[19] > number: +desc_test_complex.proto:247:34 +desc_test_complex.proto:247:39 + + + > extension[20]: +desc_test_complex.proto:248:9 +desc_test_complex.proto:248:42 + + + > extension[20] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[20] > label: +desc_test_complex.proto:248:9 +desc_test_complex.proto:248:17 + + + > extension[20] > type: +desc_test_complex.proto:248:18 +desc_test_complex.proto:248:25 + + + > extension[20] > name: +desc_test_complex.proto:248:26 +desc_test_complex.proto:248:33 + + + > extension[20] > number: +desc_test_complex.proto:248:36 +desc_test_complex.proto:248:41 + + + > extension[21]: +desc_test_complex.proto:249:9 +desc_test_complex.proto:249:42 + + + > extension[21] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[21] > label: +desc_test_complex.proto:249:9 +desc_test_complex.proto:249:17 + + + > extension[21] > type: +desc_test_complex.proto:249:18 +desc_test_complex.proto:249:25 + + + > extension[21] > name: +desc_test_complex.proto:249:26 +desc_test_complex.proto:249:33 + + + > extension[21] > number: +desc_test_complex.proto:249:36 +desc_test_complex.proto:249:41 + + + > extension[22]: +desc_test_complex.proto:250:9 +desc_test_complex.proto:250:44 + + + > extension[22] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[22] > label: +desc_test_complex.proto:250:9 +desc_test_complex.proto:250:17 + + + > extension[22] > type: +desc_test_complex.proto:250:18 +desc_test_complex.proto:250:26 + + + > extension[22] > name: +desc_test_complex.proto:250:27 +desc_test_complex.proto:250:35 + + + > extension[22] > number: +desc_test_complex.proto:250:38 +desc_test_complex.proto:250:43 + + + > extension[23]: +desc_test_complex.proto:251:9 +desc_test_complex.proto:251:44 + + + > extension[23] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[23] > label: +desc_test_complex.proto:251:9 +desc_test_complex.proto:251:17 + + + > extension[23] > type: +desc_test_complex.proto:251:18 +desc_test_complex.proto:251:26 + + + > extension[23] > name: +desc_test_complex.proto:251:27 +desc_test_complex.proto:251:35 + + + > extension[23] > number: +desc_test_complex.proto:251:38 +desc_test_complex.proto:251:43 + + + > extension[24]: +desc_test_complex.proto:252:9 +desc_test_complex.proto:252:36 + + + > extension[24] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[24] > label: +desc_test_complex.proto:252:9 +desc_test_complex.proto:252:17 + + + > extension[24] > type: +desc_test_complex.proto:252:18 +desc_test_complex.proto:252:22 + + + > extension[24] > name: +desc_test_complex.proto:252:23 +desc_test_complex.proto:252:27 + + + > extension[24] > number: +desc_test_complex.proto:252:30 +desc_test_complex.proto:252:35 + + + > extension[25]: +desc_test_complex.proto:253:9 +desc_test_complex.proto:253:38 + + + > extension[25] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[25] > label: +desc_test_complex.proto:253:9 +desc_test_complex.proto:253:17 + + + > extension[25] > type: +desc_test_complex.proto:253:18 +desc_test_complex.proto:253:23 + + + > extension[25] > name: +desc_test_complex.proto:253:24 +desc_test_complex.proto:253:29 + + + > extension[25] > number: +desc_test_complex.proto:253:32 +desc_test_complex.proto:253:37 + + + > extension[26]: +desc_test_complex.proto:254:9 +desc_test_complex.proto:254:40 + + + > extension[26] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[26] > label: +desc_test_complex.proto:254:9 +desc_test_complex.proto:254:17 + + + > extension[26] > type: +desc_test_complex.proto:254:18 +desc_test_complex.proto:254:24 + + + > extension[26] > name: +desc_test_complex.proto:254:25 +desc_test_complex.proto:254:31 + + + > extension[26] > number: +desc_test_complex.proto:254:34 +desc_test_complex.proto:254:39 + + + > extension[27]: +desc_test_complex.proto:255:9 +desc_test_complex.proto:255:40 + + + > extension[27] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[27] > label: +desc_test_complex.proto:255:9 +desc_test_complex.proto:255:17 + + + > extension[27] > type: +desc_test_complex.proto:255:18 +desc_test_complex.proto:255:22 + + + > extension[27] > name: +desc_test_complex.proto:255:23 +desc_test_complex.proto:255:31 + + + > extension[27] > number: +desc_test_complex.proto:255:34 +desc_test_complex.proto:255:39 + + + > extension[28]: +desc_test_complex.proto:256:9 +desc_test_complex.proto:256:40 + + + > extension[28] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[28] > label: +desc_test_complex.proto:256:9 +desc_test_complex.proto:256:17 + + + > extension[28] > type: +desc_test_complex.proto:256:18 +desc_test_complex.proto:256:22 + + + > extension[28] > name: +desc_test_complex.proto:256:23 +desc_test_complex.proto:256:31 + + + > extension[28] > number: +desc_test_complex.proto:256:34 +desc_test_complex.proto:256:39 + + + > extension[29]: +desc_test_complex.proto:257:9 +desc_test_complex.proto:257:40 + + + > extension[29] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[29] > label: +desc_test_complex.proto:257:9 +desc_test_complex.proto:257:17 + + + > extension[29] > type: +desc_test_complex.proto:257:18 +desc_test_complex.proto:257:22 + + + > extension[29] > name: +desc_test_complex.proto:257:23 +desc_test_complex.proto:257:31 + + + > extension[29] > number: +desc_test_complex.proto:257:34 +desc_test_complex.proto:257:39 + + + > extension[30]: +desc_test_complex.proto:258:9 +desc_test_complex.proto:258:39 + + + > extension[30] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[30] > label: +desc_test_complex.proto:258:9 +desc_test_complex.proto:258:17 + + + > extension[30] > type: +desc_test_complex.proto:258:18 +desc_test_complex.proto:258:22 + + + > extension[30] > name: +desc_test_complex.proto:258:23 +desc_test_complex.proto:258:30 + + + > extension[30] > number: +desc_test_complex.proto:258:33 +desc_test_complex.proto:258:38 + + + > extension[31]: +desc_test_complex.proto:259:9 +desc_test_complex.proto:259:36 + + + > extension[31] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[31] > label: +desc_test_complex.proto:259:9 +desc_test_complex.proto:259:17 + + + > extension[31] > type: +desc_test_complex.proto:259:18 +desc_test_complex.proto:259:22 + + + > extension[31] > name: +desc_test_complex.proto:259:23 +desc_test_complex.proto:259:27 + + + > extension[31] > number: +desc_test_complex.proto:259:30 +desc_test_complex.proto:259:35 + + + > extension[32]: +desc_test_complex.proto:260:9 +desc_test_complex.proto:260:39 + + + > extension[32] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[32] > label: +desc_test_complex.proto:260:9 +desc_test_complex.proto:260:17 + + + > extension[32] > type: +desc_test_complex.proto:260:18 +desc_test_complex.proto:260:22 + + + > extension[32] > name: +desc_test_complex.proto:260:23 +desc_test_complex.proto:260:30 + + + > extension[32] > number: +desc_test_complex.proto:260:33 +desc_test_complex.proto:260:38 + + + > extension[33]: +desc_test_complex.proto:261:9 +desc_test_complex.proto:261:35 + + + > extension[33] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[33] > label: +desc_test_complex.proto:261:9 +desc_test_complex.proto:261:17 + + + > extension[33] > type: +desc_test_complex.proto:261:18 +desc_test_complex.proto:261:22 + + + > extension[33] > name: +desc_test_complex.proto:261:23 +desc_test_complex.proto:261:26 + + + > extension[33] > number: +desc_test_complex.proto:261:29 +desc_test_complex.proto:261:34 + + + > extension[34]: +desc_test_complex.proto:262:9 +desc_test_complex.proto:262:38 + + + > extension[34] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[34] > label: +desc_test_complex.proto:262:9 +desc_test_complex.proto:262:17 + + + > extension[34] > type: +desc_test_complex.proto:262:18 +desc_test_complex.proto:262:22 + + + > extension[34] > name: +desc_test_complex.proto:262:23 +desc_test_complex.proto:262:29 + + + > extension[34] > number: +desc_test_complex.proto:262:32 +desc_test_complex.proto:262:37 + + + > extension[35]: +desc_test_complex.proto:263:9 +desc_test_complex.proto:263:38 + + + > extension[35] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[35] > label: +desc_test_complex.proto:263:9 +desc_test_complex.proto:263:17 + + + > extension[35] > type: +desc_test_complex.proto:263:18 +desc_test_complex.proto:263:22 + + + > extension[35] > name: +desc_test_complex.proto:263:23 +desc_test_complex.proto:263:29 + + + > extension[35] > number: +desc_test_complex.proto:263:32 +desc_test_complex.proto:263:37 + + + > extension[36]: +desc_test_complex.proto:264:9 +desc_test_complex.proto:264:42 + + + > extension[36] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[36] > label: +desc_test_complex.proto:264:9 +desc_test_complex.proto:264:17 + + + > extension[36] > type: +desc_test_complex.proto:264:18 +desc_test_complex.proto:264:22 + + + > extension[36] > name: +desc_test_complex.proto:264:23 +desc_test_complex.proto:264:33 + + + > extension[36] > number: +desc_test_complex.proto:264:36 +desc_test_complex.proto:264:41 + + + > extension[37]: +desc_test_complex.proto:265:9 +desc_test_complex.proto:265:40 + + + > extension[37] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[37] > label: +desc_test_complex.proto:265:9 +desc_test_complex.proto:265:17 + + + > extension[37] > type: +desc_test_complex.proto:265:18 +desc_test_complex.proto:265:22 + + + > extension[37] > name: +desc_test_complex.proto:265:23 +desc_test_complex.proto:265:31 + + + > extension[37] > number: +desc_test_complex.proto:265:34 +desc_test_complex.proto:265:39 + + + > extension[38]: +desc_test_complex.proto:266:9 +desc_test_complex.proto:266:34 + + + > extension[38] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[38] > label: +desc_test_complex.proto:266:9 +desc_test_complex.proto:266:17 + + + > extension[38] > type: +desc_test_complex.proto:266:18 +desc_test_complex.proto:266:22 + + + > extension[38] > name: +desc_test_complex.proto:266:23 +desc_test_complex.proto:266:25 + + + > extension[38] > number: +desc_test_complex.proto:266:28 +desc_test_complex.proto:266:33 + + + > extension[39]: +desc_test_complex.proto:267:9 +desc_test_complex.proto:267:37 + + + > extension[39] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[39] > label: +desc_test_complex.proto:267:9 +desc_test_complex.proto:267:17 + + + > extension[39] > type: +desc_test_complex.proto:267:18 +desc_test_complex.proto:267:23 + + + > extension[39] > name: +desc_test_complex.proto:267:24 +desc_test_complex.proto:267:28 + + + > extension[39] > number: +desc_test_complex.proto:267:31 +desc_test_complex.proto:267:36 + + + > extension[40]: +desc_test_complex.proto:268:9 +desc_test_complex.proto:268:38 + + + > extension[40] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[40] > label: +desc_test_complex.proto:268:9 +desc_test_complex.proto:268:17 + + + > extension[40] > type: +desc_test_complex.proto:268:18 +desc_test_complex.proto:268:23 + + + > extension[40] > name: +desc_test_complex.proto:268:24 +desc_test_complex.proto:268:29 + + + > extension[40] > number: +desc_test_complex.proto:268:32 +desc_test_complex.proto:268:37 + + + > extension[41]: +desc_test_complex.proto:269:9 +desc_test_complex.proto:269:40 + + + > extension[41] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[41] > label: +desc_test_complex.proto:269:9 +desc_test_complex.proto:269:17 + + + > extension[41] > type: +desc_test_complex.proto:269:18 +desc_test_complex.proto:269:23 + + + > extension[41] > name: +desc_test_complex.proto:269:24 +desc_test_complex.proto:269:31 + + + > extension[41] > number: +desc_test_complex.proto:269:34 +desc_test_complex.proto:269:39 + + + > extension[42]: +desc_test_complex.proto:270:9 +desc_test_complex.proto:270:49 + + + > extension[42] > extendee: +desc_test_complex.proto:234:8 +desc_test_complex.proto:234:36 + + + > extension[42] > label: +desc_test_complex.proto:270:9 +desc_test_complex.proto:270:17 + + + > extension[42] > type_name: +desc_test_complex.proto:270:18 +desc_test_complex.proto:270:35 + + + > extension[42] > name: +desc_test_complex.proto:270:36 +desc_test_complex.proto:270:40 + + + > extension[42] > number: +desc_test_complex.proto:270:43 +desc_test_complex.proto:270:48 + + + > message_type[9]: +desc_test_complex.proto:273:1 +desc_test_complex.proto:298:2 + + + > message_type[9] > name: +desc_test_complex.proto:273:9 +desc_test_complex.proto:273:32 + + + > message_type[9] > field[0]: +desc_test_complex.proto:274:9 +desc_test_complex.proto:284:11 + + + > message_type[9] > field[0] > label: +desc_test_complex.proto:274:9 +desc_test_complex.proto:274:17 + + + > message_type[9] > field[0] > type: +desc_test_complex.proto:274:18 +desc_test_complex.proto:274:24 + + + > message_type[9] > field[0] > name: +desc_test_complex.proto:274:25 +desc_test_complex.proto:274:27 + + + > message_type[9] > field[0] > number: +desc_test_complex.proto:274:30 +desc_test_complex.proto:274:31 + + + > message_type[9] > field[0] > options: +desc_test_complex.proto:274:32 +desc_test_complex.proto:284:10 + + + > message_type[9] > field[0] > options > (foo.bar.syntax): +desc_test_complex.proto:275:17 +desc_test_complex.proto:275:32 + + + > message_type[9] > field[0] > options > (foo.bar.import): +desc_test_complex.proto:275:34 +desc_test_complex.proto:275:49 + + + > message_type[9] > field[0] > options > (foo.bar.public): +desc_test_complex.proto:275:51 +desc_test_complex.proto:275:66 + + + > message_type[9] > field[0] > options > (foo.bar.weak): +desc_test_complex.proto:275:68 +desc_test_complex.proto:275:81 + + + > message_type[9] > field[0] > options > (foo.bar.package): +desc_test_complex.proto:275:83 +desc_test_complex.proto:275:99 + + + > message_type[9] > field[0] > options > (foo.bar.string): +desc_test_complex.proto:276:17 +desc_test_complex.proto:276:78 + + + > message_type[9] > field[0] > options > (foo.bar.bytes): +desc_test_complex.proto:276:80 +desc_test_complex.proto:276:139 + + + > message_type[9] > field[0] > options > (foo.bar.bool): +desc_test_complex.proto:276:141 +desc_test_complex.proto:276:154 + + + > message_type[9] > field[0] > options > (foo.bar.float): +desc_test_complex.proto:277:17 +desc_test_complex.proto:277:31 + + + > message_type[9] > field[0] > options > (foo.bar.double): +desc_test_complex.proto:277:33 +desc_test_complex.proto:277:51 + + + > message_type[9] > field[0] > options > (foo.bar.int32): +desc_test_complex.proto:278:17 +desc_test_complex.proto:278:29 + + + > message_type[9] > field[0] > options > (foo.bar.int64): +desc_test_complex.proto:278:31 +desc_test_complex.proto:278:43 + + + > message_type[9] > field[0] > options > (foo.bar.uint32): +desc_test_complex.proto:278:45 +desc_test_complex.proto:278:60 + + + > message_type[9] > field[0] > options > (foo.bar.uint64): +desc_test_complex.proto:278:62 +desc_test_complex.proto:278:77 + + + > message_type[9] > field[0] > options > (foo.bar.sint32): +desc_test_complex.proto:278:79 +desc_test_complex.proto:278:93 + + + > message_type[9] > field[0] > options > (foo.bar.sint64): +desc_test_complex.proto:278:95 +desc_test_complex.proto:278:109 + + + > message_type[9] > field[0] > options > (foo.bar.fixed32): +desc_test_complex.proto:279:17 +desc_test_complex.proto:279:33 + + + > message_type[9] > field[0] > options > (foo.bar.fixed64): +desc_test_complex.proto:279:35 +desc_test_complex.proto:279:51 + + + > message_type[9] > field[0] > options > (foo.bar.sfixed32): +desc_test_complex.proto:279:53 +desc_test_complex.proto:279:71 + + + > message_type[9] > field[0] > options > (foo.bar.sfixed64): +desc_test_complex.proto:279:73 +desc_test_complex.proto:279:91 + + + > message_type[9] > field[0] > options > (foo.bar.optional): +desc_test_complex.proto:280:17 +desc_test_complex.proto:280:34 + + + > message_type[9] > field[0] > options > (foo.bar.repeated): +desc_test_complex.proto:280:36 +desc_test_complex.proto:280:53 + + + > message_type[9] > field[0] > options > (foo.bar.required): +desc_test_complex.proto:280:55 +desc_test_complex.proto:280:72 + + + > message_type[9] > field[0] > options > (foo.bar.message): +desc_test_complex.proto:281:17 +desc_test_complex.proto:281:33 + + + > message_type[9] > field[0] > options > (foo.bar.enum): +desc_test_complex.proto:281:35 +desc_test_complex.proto:281:48 + + + > message_type[9] > field[0] > options > (foo.bar.service): +desc_test_complex.proto:281:50 +desc_test_complex.proto:281:66 + + + > message_type[9] > field[0] > options > (foo.bar.rpc): +desc_test_complex.proto:281:68 +desc_test_complex.proto:281:80 + + + > message_type[9] > field[0] > options > (foo.bar.option): +desc_test_complex.proto:282:17 +desc_test_complex.proto:282:32 + + + > message_type[9] > field[0] > options > (foo.bar.extend): +desc_test_complex.proto:282:34 +desc_test_complex.proto:282:49 + + + > message_type[9] > field[0] > options > (foo.bar.extensions): +desc_test_complex.proto:282:51 +desc_test_complex.proto:282:70 + + + > message_type[9] > field[0] > options > (foo.bar.reserved): +desc_test_complex.proto:282:72 +desc_test_complex.proto:282:89 + + + > message_type[9] > field[0] > options > (foo.bar.to): +desc_test_complex.proto:283:17 +desc_test_complex.proto:283:28 + + + > message_type[9] > field[0] > options > (foo.bar.true): +desc_test_complex.proto:283:30 +desc_test_complex.proto:283:42 + + + > message_type[9] > field[0] > options > (foo.bar.false): +desc_test_complex.proto:283:44 +desc_test_complex.proto:283:58 + + + > message_type[9] > field[0] > options > (foo.bar.default): +desc_test_complex.proto:283:60 +desc_test_complex.proto:283:75 + + + > message_type[9] > field[1]: +desc_test_complex.proto:285:9 +desc_test_complex.proto:297:11 + + + > message_type[9] > field[1] > label: +desc_test_complex.proto:285:9 +desc_test_complex.proto:285:17 + + + > message_type[9] > field[1] > type: +desc_test_complex.proto:285:18 +desc_test_complex.proto:285:24 + + + > message_type[9] > field[1] > name: +desc_test_complex.proto:285:25 +desc_test_complex.proto:285:29 + + + > message_type[9] > field[1] > number: +desc_test_complex.proto:285:32 +desc_test_complex.proto:285:33 + + + > message_type[9] > field[1] > options: +desc_test_complex.proto:285:34 +desc_test_complex.proto:297:10 + + + > message_type[9] > field[1] > options > (foo.bar.boom): +desc_test_complex.proto:286:17 +desc_test_complex.proto:296:18 +---- desc_test_options.proto ---- + + +: +desc_test_options.proto:1:1 +desc_test_options.proto:63:2 + + + > syntax: +desc_test_options.proto:1:1 +desc_test_options.proto:1:19 + + + > options: +desc_test_options.proto:3:1 +desc_test_options.proto:3:73 + + + > options > go_package: +desc_test_options.proto:3:1 +desc_test_options.proto:3:73 + + + > package: +desc_test_options.proto:5:1 +desc_test_options.proto:5:20 + + + > dependency[0]: +desc_test_options.proto:7:1 +desc_test_options.proto:7:43 + + + > extension: +desc_test_options.proto:9:1 +desc_test_options.proto:11:2 + + + > extension[0]: +desc_test_options.proto:10:9 +desc_test_options.proto:10:38 + + + > extension[0] > extendee: +desc_test_options.proto:9:8 +desc_test_options.proto:9:38 + + + > extension[0] > label: +desc_test_options.proto:10:9 +desc_test_options.proto:10:17 + + + > extension[0] > type: +desc_test_options.proto:10:18 +desc_test_options.proto:10:22 + + + > extension[0] > name: +desc_test_options.proto:10:23 +desc_test_options.proto:10:29 + + + > extension[0] > number: +desc_test_options.proto:10:32 +desc_test_options.proto:10:37 + + + > extension: +desc_test_options.proto:13:1 +desc_test_options.proto:16:2 + + + > extension[1]: +desc_test_options.proto:14:9 +desc_test_options.proto:14:40 + + + > extension[1] > extendee: +desc_test_options.proto:13:8 +desc_test_options.proto:13:36 + + + > extension[1] > label: +desc_test_options.proto:14:9 +desc_test_options.proto:14:17 + + + > extension[1] > type: +desc_test_options.proto:14:18 +desc_test_options.proto:14:24 + + + > extension[1] > name: +desc_test_options.proto:14:25 +desc_test_options.proto:14:31 + + + > extension[1] > number: +desc_test_options.proto:14:34 +desc_test_options.proto:14:39 + + + > extension[2]: +desc_test_options.proto:15:9 +desc_test_options.proto:15:40 + + + > extension[2] > extendee: +desc_test_options.proto:13:8 +desc_test_options.proto:13:36 + + + > extension[2] > label: +desc_test_options.proto:15:9 +desc_test_options.proto:15:17 + + + > extension[2] > type: +desc_test_options.proto:15:18 +desc_test_options.proto:15:23 + + + > extension[2] > name: +desc_test_options.proto:15:24 +desc_test_options.proto:15:31 + + + > extension[2] > number: +desc_test_options.proto:15:34 +desc_test_options.proto:15:39 + + + > extension: +desc_test_options.proto:18:1 +desc_test_options.proto:24:2 + + + > extension[3]: +desc_test_options.proto:19:9 +desc_test_options.proto:19:39 + + + > extension[3] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[3] > label: +desc_test_options.proto:19:9 +desc_test_options.proto:19:17 + + + > extension[3] > type: +desc_test_options.proto:19:18 +desc_test_options.proto:19:23 + + + > extension[3] > name: +desc_test_options.proto:19:24 +desc_test_options.proto:19:30 + + + > extension[3] > number: +desc_test_options.proto:19:33 +desc_test_options.proto:19:38 + + + > extension[4]: +desc_test_options.proto:20:9 +desc_test_options.proto:20:41 + + + > extension[4] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[4] > label: +desc_test_options.proto:20:9 +desc_test_options.proto:20:17 + + + > extension[4] > type: +desc_test_options.proto:20:18 +desc_test_options.proto:20:24 + + + > extension[4] > name: +desc_test_options.proto:20:25 +desc_test_options.proto:20:32 + + + > extension[4] > number: +desc_test_options.proto:20:35 +desc_test_options.proto:20:40 + + + > extension[5]: +desc_test_options.proto:21:9 +desc_test_options.proto:21:44 + + + > extension[5] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[5] > label: +desc_test_options.proto:21:9 +desc_test_options.proto:21:17 + + + > extension[5] > type: +desc_test_options.proto:21:18 +desc_test_options.proto:21:26 + + + > extension[5] > name: +desc_test_options.proto:21:27 +desc_test_options.proto:21:35 + + + > extension[5] > number: +desc_test_options.proto:21:38 +desc_test_options.proto:21:43 + + + > extension[6]: +desc_test_options.proto:22:9 +desc_test_options.proto:22:41 + + + > extension[6] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[6] > label: +desc_test_options.proto:22:9 +desc_test_options.proto:22:17 + + + > extension[6] > type: +desc_test_options.proto:22:18 +desc_test_options.proto:22:24 + + + > extension[6] > name: +desc_test_options.proto:22:25 +desc_test_options.proto:22:32 + + + > extension[6] > number: +desc_test_options.proto:22:35 +desc_test_options.proto:22:40 + + + > extension[7]: +desc_test_options.proto:23:9 +desc_test_options.proto:23:43 + + + > extension[7] > extendee: +desc_test_options.proto:18:8 +desc_test_options.proto:18:35 + + + > extension[7] > label: +desc_test_options.proto:23:9 +desc_test_options.proto:23:17 + + + > extension[7] > type: +desc_test_options.proto:23:18 +desc_test_options.proto:23:25 + + + > extension[7] > name: +desc_test_options.proto:23:26 +desc_test_options.proto:23:34 + + + > extension[7] > number: +desc_test_options.proto:23:37 +desc_test_options.proto:23:42 + + + > extension: +desc_test_options.proto:26:1 +desc_test_options.proto:32:2 + + + > extension[8]: +desc_test_options.proto:27:9 +desc_test_options.proto:27:40 + + + > extension[8] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[8] > label: +desc_test_options.proto:27:9 +desc_test_options.proto:27:17 + + + > extension[8] > type: +desc_test_options.proto:27:18 +desc_test_options.proto:27:23 + + + > extension[8] > name: +desc_test_options.proto:27:24 +desc_test_options.proto:27:31 + + + > extension[8] > number: +desc_test_options.proto:27:34 +desc_test_options.proto:27:39 + + + > extension[9]: +desc_test_options.proto:28:9 +desc_test_options.proto:28:42 + + + > extension[9] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[9] > label: +desc_test_options.proto:28:9 +desc_test_options.proto:28:17 + + + > extension[9] > type: +desc_test_options.proto:28:18 +desc_test_options.proto:28:24 + + + > extension[9] > name: +desc_test_options.proto:28:25 +desc_test_options.proto:28:33 + + + > extension[9] > number: +desc_test_options.proto:28:36 +desc_test_options.proto:28:41 + + + > extension[10]: +desc_test_options.proto:29:9 +desc_test_options.proto:29:45 + + + > extension[10] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[10] > label: +desc_test_options.proto:29:9 +desc_test_options.proto:29:17 + + + > extension[10] > type: +desc_test_options.proto:29:18 +desc_test_options.proto:29:26 + + + > extension[10] > name: +desc_test_options.proto:29:27 +desc_test_options.proto:29:36 + + + > extension[10] > number: +desc_test_options.proto:29:39 +desc_test_options.proto:29:44 + + + > extension[11]: +desc_test_options.proto:30:9 +desc_test_options.proto:30:42 + + + > extension[11] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[11] > label: +desc_test_options.proto:30:9 +desc_test_options.proto:30:17 + + + > extension[11] > type: +desc_test_options.proto:30:18 +desc_test_options.proto:30:24 + + + > extension[11] > name: +desc_test_options.proto:30:25 +desc_test_options.proto:30:33 + + + > extension[11] > number: +desc_test_options.proto:30:36 +desc_test_options.proto:30:41 + + + > extension[12]: +desc_test_options.proto:31:9 +desc_test_options.proto:31:44 + + + > extension[12] > extendee: +desc_test_options.proto:26:8 +desc_test_options.proto:26:40 + + + > extension[12] > label: +desc_test_options.proto:31:9 +desc_test_options.proto:31:17 + + + > extension[12] > type: +desc_test_options.proto:31:18 +desc_test_options.proto:31:25 + + + > extension[12] > name: +desc_test_options.proto:31:26 +desc_test_options.proto:31:35 + + + > extension[12] > number: +desc_test_options.proto:31:38 +desc_test_options.proto:31:43 + + + > extension: +desc_test_options.proto:34:1 +desc_test_options.proto:37:2 + + + > extension[13]: +desc_test_options.proto:35:9 +desc_test_options.proto:35:53 + + + > extension[13] > extendee: +desc_test_options.proto:34:8 +desc_test_options.proto:34:38 + + + > extension[13] > label: +desc_test_options.proto:35:9 +desc_test_options.proto:35:17 + + + > extension[13] > type_name: +desc_test_options.proto:35:18 +desc_test_options.proto:35:37 + + + > extension[13] > name: +desc_test_options.proto:35:38 +desc_test_options.proto:35:44 + + + > extension[13] > number: +desc_test_options.proto:35:47 +desc_test_options.proto:35:52 + + + > extension[14]: +desc_test_options.proto:36:9 +desc_test_options.proto:36:51 + + + > extension[14] > extendee: +desc_test_options.proto:34:8 +desc_test_options.proto:34:38 + + + > extension[14] > label: +desc_test_options.proto:36:9 +desc_test_options.proto:36:17 + + + > extension[14] > type_name: +desc_test_options.proto:36:18 +desc_test_options.proto:36:34 + + + > extension[14] > name: +desc_test_options.proto:36:35 +desc_test_options.proto:36:42 + + + > extension[14] > number: +desc_test_options.proto:36:45 +desc_test_options.proto:36:50 + + + > extension: +desc_test_options.proto:39:1 +desc_test_options.proto:42:2 + + + > extension[15]: +desc_test_options.proto:40:9 +desc_test_options.proto:40:40 + + + > extension[15] > extendee: +desc_test_options.proto:39:8 +desc_test_options.proto:39:37 + + + > extension[15] > label: +desc_test_options.proto:40:9 +desc_test_options.proto:40:17 + + + > extension[15] > type: +desc_test_options.proto:40:18 +desc_test_options.proto:40:23 + + + > extension[15] > name: +desc_test_options.proto:40:24 +desc_test_options.proto:40:31 + + + > extension[15] > number: +desc_test_options.proto:40:34 +desc_test_options.proto:40:39 + + + > extension[16]: +desc_test_options.proto:41:9 +desc_test_options.proto:41:42 + + + > extension[16] > extendee: +desc_test_options.proto:39:8 +desc_test_options.proto:39:37 + + + > extension[16] > label: +desc_test_options.proto:41:9 +desc_test_options.proto:41:17 + + + > extension[16] > type: +desc_test_options.proto:41:18 +desc_test_options.proto:41:24 + + + > extension[16] > name: +desc_test_options.proto:41:25 +desc_test_options.proto:41:33 + + + > extension[16] > number: +desc_test_options.proto:41:36 +desc_test_options.proto:41:41 + + + > message_type[0]: +desc_test_options.proto:45:1 +desc_test_options.proto:48:2 + Leading comments: + Test message used by custom options + + + + > message_type[0] > name: +desc_test_options.proto:45:9 +desc_test_options.proto:45:28 + + + > message_type[0] > field[0]: +desc_test_options.proto:46:9 +desc_test_options.proto:46:32 + + + > message_type[0] > field[0] > label: +desc_test_options.proto:46:9 +desc_test_options.proto:46:17 + + + > message_type[0] > field[0] > type: +desc_test_options.proto:46:18 +desc_test_options.proto:46:24 + + + > message_type[0] > field[0] > name: +desc_test_options.proto:46:25 +desc_test_options.proto:46:27 + + + > message_type[0] > field[0] > number: +desc_test_options.proto:46:30 +desc_test_options.proto:46:31 + + + > message_type[0] > field[1]: +desc_test_options.proto:47:9 +desc_test_options.proto:47:34 + + + > message_type[0] > field[1] > label: +desc_test_options.proto:47:9 +desc_test_options.proto:47:17 + + + > message_type[0] > field[1] > type: +desc_test_options.proto:47:18 +desc_test_options.proto:47:24 + + + > message_type[0] > field[1] > name: +desc_test_options.proto:47:25 +desc_test_options.proto:47:29 + + + > message_type[0] > field[1] > number: +desc_test_options.proto:47:32 +desc_test_options.proto:47:33 + + + > enum_type[0]: +desc_test_options.proto:51:1 +desc_test_options.proto:53:2 + Leading comments: + Test enum used by custom options + + + + > enum_type[0] > name: +desc_test_options.proto:51:6 +desc_test_options.proto:51:22 + + + > enum_type[0] > value[0]: +desc_test_options.proto:52:9 +desc_test_options.proto:52:19 + + + > enum_type[0] > value[0] > name: +desc_test_options.proto:52:9 +desc_test_options.proto:52:14 + + + > enum_type[0] > value[0] > number: +desc_test_options.proto:52:17 +desc_test_options.proto:52:18 + + + > extension: +desc_test_options.proto:55:1 +desc_test_options.proto:58:2 + + + > extension[17]: +desc_test_options.proto:56:9 +desc_test_options.proto:56:41 + + + > extension[17] > extendee: +desc_test_options.proto:55:8 +desc_test_options.proto:55:45 + + + > extension[17] > label: +desc_test_options.proto:56:9 +desc_test_options.proto:56:17 + + + > extension[17] > type: +desc_test_options.proto:56:18 +desc_test_options.proto:56:24 + + + > extension[17] > name: +desc_test_options.proto:56:25 +desc_test_options.proto:56:32 + + + > extension[17] > number: +desc_test_options.proto:56:35 +desc_test_options.proto:56:40 + + + > extension[18]: +desc_test_options.proto:57:9 +desc_test_options.proto:57:41 + + + > extension[18] > extendee: +desc_test_options.proto:55:8 +desc_test_options.proto:55:45 + + + > extension[18] > label: +desc_test_options.proto:57:9 +desc_test_options.proto:57:17 + + + > extension[18] > type: +desc_test_options.proto:57:18 +desc_test_options.proto:57:23 + + + > extension[18] > name: +desc_test_options.proto:57:24 +desc_test_options.proto:57:32 + + + > extension[18] > number: +desc_test_options.proto:57:35 +desc_test_options.proto:57:40 + + + > extension: +desc_test_options.proto:60:1 +desc_test_options.proto:63:2 + + + > extension[19]: +desc_test_options.proto:61:9 +desc_test_options.proto:61:41 + + + > extension[19] > extendee: +desc_test_options.proto:60:8 +desc_test_options.proto:60:36 + + + > extension[19] > label: +desc_test_options.proto:61:9 +desc_test_options.proto:61:17 + + + > extension[19] > type: +desc_test_options.proto:61:18 +desc_test_options.proto:61:24 + + + > extension[19] > name: +desc_test_options.proto:61:25 +desc_test_options.proto:61:32 + + + > extension[19] > number: +desc_test_options.proto:61:35 +desc_test_options.proto:61:40 + + + > extension[20]: +desc_test_options.proto:62:9 +desc_test_options.proto:62:41 + + + > extension[20] > extendee: +desc_test_options.proto:60:8 +desc_test_options.proto:60:36 + + + > extension[20] > label: +desc_test_options.proto:62:9 +desc_test_options.proto:62:17 + + + > extension[20] > type: +desc_test_options.proto:62:18 +desc_test_options.proto:62:23 + + + > extension[20] > name: +desc_test_options.proto:62:24 +desc_test_options.proto:62:32 + + + > extension[20] > number: +desc_test_options.proto:62:35 +desc_test_options.proto:62:40 diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go new file mode 100644 index 00000000..20d2d7a0 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go @@ -0,0 +1,207 @@ +package sourceinfo + +import ( + "math" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" +) + +// NB: forked from google.golang.org/protobuf/internal/filedesc +type sourceLocations struct { + protoreflect.SourceLocations + + orig []*descriptorpb.SourceCodeInfo_Location + // locs is a list of sourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + locs []protoreflect.SourceLocation + + // fd is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + fd protoreflect.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *sourceLocations) Len() int { return len(p.orig) } +func (p *sourceLocations) Get(i int) protoreflect.SourceLocation { + return p.lazyInit().locs[i] +} +func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.locs[i] + } + return protoreflect.SourceLocation{} +} +func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { + if p.fd != nil && desc != nil && p.fd != desc.ParentFile() { + return protoreflect.SourceLocation{} // mismatching parent imports + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case protoreflect.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case protoreflect.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_messagesTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_nestedMessagesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_extensionsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_extensionsTag)) + default: + return protoreflect.SourceLocation{} + } + } else { + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_fieldsTag)) + default: + return protoreflect.SourceLocation{} + } + } + case protoreflect.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_oneOfsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_enumsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_enumsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.EnumDescriptor: + path = append(path, int32(internal.Enum_valuesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_servicesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.ServiceDescriptor: + path = append(path, int32(internal.Service_methodsTag)) + default: + return protoreflect.SourceLocation{} + } + default: + return protoreflect.SourceLocation{} + } + } +} +func (p *sourceLocations) lazyInit() *sourceLocations { + p.once.Do(func() { + if len(p.orig) > 0 { + p.locs = make([]protoreflect.SourceLocation, len(p.orig)) + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.locs)) + for i := range p.orig { + l := asSourceLocation(p.orig[i]) + p.locs[i] = l + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.locs)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.locs[idxs[i]].Next = idxs[i+1] + } + p.locs[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} + +func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation { + endLine := l.Span[0] + endCol := l.Span[2] + if len(l.Span) > 3 { + endLine = l.Span[2] + endCol = l.Span[3] + } + return protoreflect.SourceLocation{ + Path: l.Path, + StartLine: int(l.Span[0]), + StartColumn: int(l.Span[1]), + EndLine: int(endLine), + EndColumn: int(endCol), + LeadingDetachedComments: l.LeadingDetachedComments, + LeadingComments: l.GetLeadingComments(), + TrailingComments: l.GetTrailingComments(), + } +} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p protoreflect.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go new file mode 100644 index 00000000..de38e0d1 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go @@ -0,0 +1,269 @@ +// Package sourceinfo provides the ability to register and query source code info +// for file descriptors that are compiled into the binary. This data is registered +// by code generated from the protoc-gen-gosrcinfo plugin. +// +// The standard descriptors bundled into the compiled binary are stripped of source +// code info, to reduce binary size and reduce runtime memory footprint. However, +// the source code info can be very handy and worth the size cost when used with +// gRPC services and the server reflection service. Without source code info, the +// descriptors that a client downloads from the reflection service have no comments. +// But the presence of comments, and the ability to show them to humans, can greatly +// improve the utility of user agents that use the reflection service. +// +// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load +// descriptors for compiled-in elements, will automatically include source code +// info, using the data registered with this package. +// +// In order to make the reflection service use this functionality, you will need to +// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The +// following snippet demonstrates how to do this in your server. Do this instead of +// using the reflection.Register function: +// +// refSvr := reflection.NewServer(reflection.ServerOptions{ +// Services: grpcServer, +// DescriptorResolver: sourceinfo.GlobalFiles, +// ExtensionResolver: sourceinfo.GlobalFiles, +// }) +// grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr) +package sourceinfo + +import ( + "bytes" + "compress/gzip" + "fmt" + "github.com/golang/protobuf/proto" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + // GlobalFiles is a registry of descriptors that include source code info, if the + // files they belong to were processed with protoc-gen-gosrcinfo. + // + // If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that + // can include source code info in the returned descriptors. + GlobalFiles Resolver = registry{} + + // GlobalTypes is a registry of descriptors that include source code info, if the + // files they belong to were processed with protoc-gen-gosrcinfo. + // + // If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that + // can include source code info in the returned descriptors. + GlobalTypes TypeResolver = registry{} + + mu sync.RWMutex + sourceInfoByFile = map[string]*descriptorpb.SourceCodeInfo{} + fileDescriptors = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{} +) + +// Resolver can resolve file names into file descriptors and also provides methods for +// resolving extensions. +type Resolver interface { + protodesc.Resolver + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// NB: These interfaces are far from ideal. Ideally, Resolver would have +// * EITHER been named FileResolver and not included the extension methods. +// * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver). +// Now (since it's been released) we can't add the message methods to the interface as +// that's not a backwards-compatible change. So we have to introduce the new interface +// below, which is now a little confusing since it has some overlap with Resolver. + +// TypeResolver can resolve message names and URLs into message descriptors and also +// provides methods for resolving extensions. +type TypeResolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// RegisterSourceInfo registers the given source code info for the file descriptor +// with the given path/name. +// +// This is automatically used from older generated code if using a previous release of +// the protoc-gen-gosrcinfo plugin. +func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) { + mu.Lock() + defer mu.Unlock() + sourceInfoByFile[file] = srcInfo +} + +// RegisterEncodedSourceInfo registers the given source code info, which is a serialized +// and gzipped form of a google.protobuf.SourceCodeInfo message. +// +// This is automatically used from generated code if using the protoc-gen-gosrcinfo +// plugin. +func RegisterEncodedSourceInfo(file string, data []byte) error { + zipReader, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return err + } + defer func() { + _ = zipReader.Close() + }() + unzipped, err := ioutil.ReadAll(zipReader) + if err != nil { + return err + } + var srcInfo descriptorpb.SourceCodeInfo + if err := proto.Unmarshal(unzipped, &srcInfo); err != nil { + return err + } + RegisterSourceInfo(file, &srcInfo) + return nil +} + +// SourceInfoForFile queries for any registered source code info for the file +// descriptor with the given path/name. It returns nil if no source code info +// was registered. +func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo { + mu.RLock() + defer mu.RUnlock() + return sourceInfoByFile[file] +} + +func canWrap(d protoreflect.Descriptor) bool { + srcInfo := SourceInfoForFile(d.ParentFile().Path()) + return len(srcInfo.GetLocation()) > 0 +} + +func getFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor { + if fd == nil { + return nil + } + + mu.RLock() + result := fileDescriptors[fd] + mu.RUnlock() + + if result != nil { + return result + } + + mu.Lock() + defer mu.Unlock() + // double-check, in case it was added to map while upgrading lock + result = fileDescriptors[fd] + if result != nil { + return result + } + + srcInfo := sourceInfoByFile[fd.Path()] + if len(srcInfo.GetLocation()) > 0 { + result = &fileDescriptor{ + FileDescriptor: fd, + locs: &sourceLocations{ + orig: srcInfo.Location, + }, + } + } else { + // nothing to do; don't bother wrapping + result = fd + } + fileDescriptors[fd] = result + return result +} + +type registry struct{} + +var _ protodesc.Resolver = ®istry{} + +func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + fd, err := protoregistry.GlobalFiles.FindFileByPath(path) + if err != nil { + return nil, err + } + return getFile(fd), nil +} + +func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + d, err := protoregistry.GlobalFiles.FindDescriptorByName(name) + if !canWrap(d) { + return d, nil + } + if err != nil { + return nil, err + } + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d), nil + case protoreflect.MessageDescriptor: + return messageDescriptor{d}, nil + case protoreflect.ExtensionTypeDescriptor: + return extensionDescriptor{d}, nil + case protoreflect.FieldDescriptor: + return fieldDescriptor{d}, nil + case protoreflect.OneofDescriptor: + return oneOfDescriptor{d}, nil + case protoreflect.EnumDescriptor: + return enumDescriptor{d}, nil + case protoreflect.EnumValueDescriptor: + return enumValueDescriptor{d}, nil + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d}, nil + case protoreflect.MethodDescriptor: + return methodDescriptor{d}, nil + default: + return nil, fmt.Errorf("unrecognized descriptor type: %T", d) + } +} + +func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + mt, err := protoregistry.GlobalTypes.FindMessageByName(message) + if err != nil { + return nil, err + } + if !canWrap(mt.Descriptor()) { + return mt, nil + } + return messageType{mt}, nil +} + +func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return nil, err + } + if !canWrap(mt.Descriptor()) { + return mt, nil + } + return messageType{mt}, nil +} + +func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByName(field) + if err != nil { + return nil, err + } + if !canWrap(xt.TypeDescriptor()) { + return xt, nil + } + return extensionType{xt}, nil +} + +func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field) + if err != nil { + return nil, err + } + if !canWrap(xt.TypeDescriptor()) { + return xt, nil + } + return extensionType{xt}, nil +} + +func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) { + protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool { + if canWrap(xt.TypeDescriptor()) { + xt = extensionType{xt} + } + return fn(xt) + }) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go new file mode 100644 index 00000000..3106eaad --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go @@ -0,0 +1,636 @@ +package sourceinfo + +import ( + "fmt" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// These are wrappers around the various interfaces in the +// google.golang.org/protobuf/reflect/protoreflect that all +// make sure to return a FileDescriptor that includes source +// code info. + +type fileDescriptor struct { + protoreflect.FileDescriptor + locs protoreflect.SourceLocations +} + +func (f fileDescriptor) Edition() int32 { + ed, ok := f.FileDescriptor.(interface{ Edition() int32 }) + if ok { + return ed.Edition() + } + return 0 +} + +func (f fileDescriptor) ParentFile() protoreflect.FileDescriptor { + return f +} + +func (f fileDescriptor) Parent() protoreflect.Descriptor { + return nil +} + +func (f fileDescriptor) Imports() protoreflect.FileImports { + return imports{f.FileDescriptor.Imports()} +} + +func (f fileDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{f.FileDescriptor.Messages()} +} + +func (f fileDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{f.FileDescriptor.Enums()} +} + +func (f fileDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{f.FileDescriptor.Extensions()} +} + +func (f fileDescriptor) Services() protoreflect.ServiceDescriptors { + return services{f.FileDescriptor.Services()} +} + +func (f fileDescriptor) SourceLocations() protoreflect.SourceLocations { + return f.locs +} + +type imports struct { + protoreflect.FileImports +} + +func (im imports) Get(i int) protoreflect.FileImport { + fi := im.FileImports.Get(i) + return protoreflect.FileImport{ + FileDescriptor: getFile(fi.FileDescriptor), + IsPublic: fi.IsPublic, + IsWeak: fi.IsWeak, + } +} + +type messages struct { + protoreflect.MessageDescriptors +} + +func (m messages) Get(i int) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.Get(i)} +} + +func (m messages) ByName(n protoreflect.Name) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.ByName(n)} +} + +type enums struct { + protoreflect.EnumDescriptors +} + +func (e enums) Get(i int) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.Get(i)} +} + +func (e enums) ByName(n protoreflect.Name) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.ByName(n)} +} + +type extensions struct { + protoreflect.ExtensionDescriptors +} + +func (e extensions) Get(i int) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.Get(i) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +func (e extensions) ByName(n protoreflect.Name) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.ByName(n) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +type services struct { + protoreflect.ServiceDescriptors +} + +func (s services) Get(i int) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.Get(i)} +} + +func (s services) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.ByName(n)} +} + +type messageDescriptor struct { + protoreflect.MessageDescriptor +} + +func (m messageDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MessageDescriptor.ParentFile()) +} + +func (m messageDescriptor) Parent() protoreflect.Descriptor { + d := m.MessageDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m messageDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{m.MessageDescriptor.Fields()} +} + +func (m messageDescriptor) Oneofs() protoreflect.OneofDescriptors { + return oneOfs{m.MessageDescriptor.Oneofs()} +} + +func (m messageDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{m.MessageDescriptor.Enums()} +} + +func (m messageDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{m.MessageDescriptor.Messages()} +} + +func (m messageDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{m.MessageDescriptor.Extensions()} +} + +type fields struct { + protoreflect.FieldDescriptors +} + +func (f fields) Get(i int) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.Get(i)} +} + +func (f fields) ByName(n protoreflect.Name) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByName(n)} +} + +func (f fields) ByJSONName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByJSONName(n)} +} + +func (f fields) ByTextName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByTextName(n)} +} + +func (f fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByNumber(n)} +} + +type oneOfs struct { + protoreflect.OneofDescriptors +} + +func (o oneOfs) Get(i int) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.Get(i)} +} + +func (o oneOfs) ByName(n protoreflect.Name) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.ByName(n)} +} + +type fieldDescriptor struct { + protoreflect.FieldDescriptor +} + +func (f fieldDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(f.FieldDescriptor.ParentFile()) +} + +func (f fieldDescriptor) Parent() protoreflect.Descriptor { + d := f.FieldDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (f fieldDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := f.FieldDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (f fieldDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := f.FieldDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (f fieldDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{f.FieldDescriptor.ContainingMessage()} +} + +func (f fieldDescriptor) Enum() protoreflect.EnumDescriptor { + ed := f.FieldDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (f fieldDescriptor) Message() protoreflect.MessageDescriptor { + md := f.FieldDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +type oneOfDescriptor struct { + protoreflect.OneofDescriptor +} + +func (o oneOfDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(o.OneofDescriptor.ParentFile()) +} + +func (o oneOfDescriptor) Parent() protoreflect.Descriptor { + d := o.OneofDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (o oneOfDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{o.OneofDescriptor.Fields()} +} + +type enumDescriptor struct { + protoreflect.EnumDescriptor +} + +func (e enumDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumDescriptor.ParentFile()) +} + +func (e enumDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e enumDescriptor) Values() protoreflect.EnumValueDescriptors { + return enumValues{e.EnumDescriptor.Values()} +} + +type enumValues struct { + protoreflect.EnumValueDescriptors +} + +func (e enumValues) Get(i int) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.Get(i)} +} + +func (e enumValues) ByName(n protoreflect.Name) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByName(n)} +} + +func (e enumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByNumber(n)} +} + +type enumValueDescriptor struct { + protoreflect.EnumValueDescriptor +} + +func (e enumValueDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumValueDescriptor.ParentFile()) +} + +func (e enumValueDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumValueDescriptor.Parent() + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return enumDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +type extensionDescriptor struct { + protoreflect.ExtensionTypeDescriptor +} + +func (e extensionDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.ExtensionTypeDescriptor.ParentFile()) +} + +func (e extensionDescriptor) Parent() protoreflect.Descriptor { + d := e.ExtensionTypeDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e extensionDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := e.ExtensionTypeDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (e extensionDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := e.ExtensionTypeDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (e extensionDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{e.ExtensionTypeDescriptor.ContainingMessage()} +} + +func (e extensionDescriptor) Enum() protoreflect.EnumDescriptor { + ed := e.ExtensionTypeDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (e extensionDescriptor) Message() protoreflect.MessageDescriptor { + md := e.ExtensionTypeDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +func (e extensionDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return e +} + +var _ protoreflect.ExtensionTypeDescriptor = extensionDescriptor{} + +type serviceDescriptor struct { + protoreflect.ServiceDescriptor +} + +func (s serviceDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(s.ServiceDescriptor.ParentFile()) +} + +func (s serviceDescriptor) Parent() protoreflect.Descriptor { + d := s.ServiceDescriptor.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (s serviceDescriptor) Methods() protoreflect.MethodDescriptors { + return methods{s.ServiceDescriptor.Methods()} +} + +type methods struct { + protoreflect.MethodDescriptors +} + +func (m methods) Get(i int) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.Get(i)} +} + +func (m methods) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.ByName(n)} +} + +type methodDescriptor struct { + protoreflect.MethodDescriptor +} + +func (m methodDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MethodDescriptor.ParentFile()) +} + +func (m methodDescriptor) Parent() protoreflect.Descriptor { + d := m.MethodDescriptor.Parent() + switch d := d.(type) { + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m methodDescriptor) Input() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Input()} +} + +func (m methodDescriptor) Output() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Output()} +} + +type extensionType struct { + protoreflect.ExtensionType +} + +func (e extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return extensionDescriptor{e.ExtensionType.TypeDescriptor()} +} + +type messageType struct { + protoreflect.MessageType +} + +func (m messageType) Descriptor() protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageType.Descriptor()} +} + +// WrapFile wraps the given file descriptor so that it will include source +// code info that was registered with this package if the given file was +// processed with protoc-gen-gosrcinfo. Returns fd without wrapping if fd +// already contains source code info. +func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor { + if wrapper, ok := fd.(fileDescriptor); ok { + // already wrapped + return wrapper + } + if fd.SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return fd + } + return getFile(fd) +} + +// WrapMessage wraps the given message descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns md without wrapping if md's +// parent file already contains source code info. +func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor { + if wrapper, ok := md.(messageDescriptor); ok { + // already wrapped + return wrapper + } + if md.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return md + } + if !canWrap(md) { + return md + } + return messageDescriptor{md} +} + +// WrapEnum wraps the given enum descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns ed without wrapping if ed's +// parent file already contains source code info. +func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor { + if wrapper, ok := ed.(enumDescriptor); ok { + // already wrapped + return wrapper + } + if ed.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return ed + } + if !canWrap(ed) { + return ed + } + return enumDescriptor{ed} +} + +// WrapService wraps the given service descriptor so that it will include source +// code info that was registered with this package if the file it is defined in +// was processed with protoc-gen-gosrcinfo. Returns sd without wrapping if sd's +// parent file already contains source code info. +func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor { + if wrapper, ok := sd.(serviceDescriptor); ok { + // already wrapped + return wrapper + } + if sd.ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return sd + } + if !canWrap(sd) { + return sd + } + return serviceDescriptor{sd} +} + +// WrapExtensionType wraps the given extension type so that its associated +// descriptor will include source code info that was registered with this package +// if the file it is defined in was processed with protoc-gen-gosrcinfo. Returns +// xt without wrapping if the parent file of xt's descriptor already contains +// source code info. +func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType { + if wrapper, ok := xt.(extensionType); ok { + // already wrapped + return wrapper + } + if xt.TypeDescriptor().ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return xt + } + if !canWrap(xt.TypeDescriptor()) { + return xt + } + return extensionType{xt} +} + +// WrapMessageType wraps the given message type so that its associated +// descriptor will include source code info that was registered with this package +// if the file it is defined in was processed with protoc-gen-gosrcinfo. Returns +// mt without wrapping if the parent file of mt's descriptor already contains +// source code info. +func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType { + if wrapper, ok := mt.(messageType); ok { + // already wrapped + return wrapper + } + if mt.Descriptor().ParentFile().SourceLocations().Len() > 0 { + // no need to wrap since it includes source info already + return mt + } + if !canWrap(mt.Descriptor()) { + return mt + } + return messageType{mt} +} diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go new file mode 100644 index 00000000..5491afda --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go @@ -0,0 +1,211 @@ +package desc + +import ( + "fmt" + + "github.com/bufbuild/protocompile/protoutil" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor +// implementations in this package implement this interface. This can be +// used to recover the underlying descriptor. Each descriptor type in this +// package also provides a strongly-typed form of this method, such as the +// following method for *FileDescriptor: +// +// UnwrapFile() protoreflect.FileDescriptor +type DescriptorWrapper interface { + Unwrap() protoreflect.Descriptor +} + +// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor +// value that represents the same element. +func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) { + return wrapDescriptor(d, mapCache{}) +} + +func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) { + switch d := d.(type) { + case protoreflect.FileDescriptor: + return wrapFile(d, cache) + case protoreflect.MessageDescriptor: + return wrapMessage(d, cache) + case protoreflect.FieldDescriptor: + return wrapField(d, cache) + case protoreflect.OneofDescriptor: + return wrapOneOf(d, cache) + case protoreflect.EnumDescriptor: + return wrapEnum(d, cache) + case protoreflect.EnumValueDescriptor: + return wrapEnumValue(d, cache) + case protoreflect.ServiceDescriptor: + return wrapService(d, cache) + case protoreflect.MethodDescriptor: + return wrapMethod(d, cache) + default: + return nil, fmt.Errorf("unknown descriptor type: %T", d) + } +} + +// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor +// values that represent the same files. +func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) { + cache := mapCache{} + results := make([]*FileDescriptor, len(d)) + for i := range d { + var err error + results[i], err = wrapFile(d[i], cache) + if err != nil { + return nil, err + } + } + return results, nil +} + +// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor +// value that represents the same file. +func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) { + return wrapFile(d, mapCache{}) +} + +func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) { + if res := cache.get(d); res != nil { + return res.(*FileDescriptor), nil + } + fdp := protoutil.ProtoFromFileDescriptor(d) + return convertFile(d, fdp, cache) +} + +// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor +// value that represents the same message. +func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) { + return wrapMessage(d, mapCache{}) +} + +func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.messages[d.Index()], nil + case *MessageDescriptor: + return p.nested[d.Index()], nil + default: + return nil, fmt.Errorf("message has unexpected parent type: %T", parent) + } +} + +// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor +// value that represents the same field. +func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) { + return wrapField(d, mapCache{}) +} + +func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.extensions[d.Index()], nil + case *MessageDescriptor: + if d.IsExtension() { + return p.extensions[d.Index()], nil + } + return p.fields[d.Index()], nil + default: + return nil, fmt.Errorf("field has unexpected parent type: %T", parent) + } +} + +// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor +// value that represents the same oneof. +func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) { + return wrapOneOf(d, mapCache{}) +} + +func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*MessageDescriptor); ok { + return p.oneOfs[d.Index()], nil + } + return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent) +} + +// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor +// value that represents the same enum. +func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) { + return wrapEnum(d, mapCache{}) +} + +func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + switch p := parent.(type) { + case *FileDescriptor: + return p.enums[d.Index()], nil + case *MessageDescriptor: + return p.enums[d.Index()], nil + default: + return nil, fmt.Errorf("enum has unexpected parent type: %T", parent) + } +} + +// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor +// value that represents the same enum value. +func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) { + return wrapEnumValue(d, mapCache{}) +} + +func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*EnumDescriptor); ok { + return p.values[d.Index()], nil + } + return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent) +} + +// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor +// value that represents the same service. +func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) { + return wrapService(d, mapCache{}) +} + +func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*FileDescriptor); ok { + return p.services[d.Index()], nil + } + return nil, fmt.Errorf("service has unexpected parent type: %T", parent) +} + +// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor +// value that represents the same method. +func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) { + return wrapMethod(d, mapCache{}) +} + +func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) { + parent, err := wrapDescriptor(d.Parent(), cache) + if err != nil { + return nil, err + } + if p, ok := parent.(*ServiceDescriptor); ok { + return p.methods[d.Index()], nil + } + return nil, fmt.Errorf("method has unexpected parent type: %T", parent) +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go new file mode 100644 index 00000000..39e077a4 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go @@ -0,0 +1,193 @@ +package dynamic + +// Binary serialization and de-serialization for dynamic messages + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/codec" +) + +// defaultDeterminism, if true, will mean that calls to Marshal will produce +// deterministic output. This is used to make the output of proto.Marshal(...) +// deterministic (since there is no way to have that convey determinism intent). +// **This is only used from tests.** +var defaultDeterminism = false + +// Marshal serializes this message to bytes, returning an error if the operation +// fails. The resulting bytes are in the standard protocol buffer binary format. +func (m *Message) Marshal() ([]byte, error) { + var b codec.Buffer + b.SetDeterministic(defaultDeterminism) + if err := m.marshal(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a +// new byte slice to marshal into, it uses the provided byte slice. The backing array +// for the returned byte slice *may* be the same as the one that was passed in, but +// it's not guaranteed as a new backing array will automatically be allocated if +// more bytes need to be written than the provided buffer has capacity for. +func (m *Message) MarshalAppend(b []byte) ([]byte, error) { + codedBuf := codec.NewBuffer(b) + codedBuf.SetDeterministic(defaultDeterminism) + if err := m.marshal(codedBuf); err != nil { + return nil, err + } + return codedBuf.Bytes(), nil +} + +// MarshalDeterministic serializes this message to bytes in a deterministic way, +// returning an error if the operation fails. This differs from Marshal in that +// map keys will be sorted before serializing to bytes. The protobuf spec does +// not define ordering for map entries, so Marshal will use standard Go map +// iteration order (which will be random). But for cases where determinism is +// more important than performance, use this method instead. +func (m *Message) MarshalDeterministic() ([]byte, error) { + var b codec.Buffer + b.SetDeterministic(true) + if err := m.marshal(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic, +// except instead of allocating a new byte slice to marshal into, it uses the +// provided byte slice. The backing array for the returned byte slice *may* be +// the same as the one that was passed in, but it's not guaranteed as a new +// backing array will automatically be allocated if more bytes need to be written +// than the provided buffer has capacity for. +func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) { + codedBuf := codec.NewBuffer(b) + codedBuf.SetDeterministic(true) + if err := m.marshal(codedBuf); err != nil { + return nil, err + } + return codedBuf.Bytes(), nil +} + +func (m *Message) marshal(b *codec.Buffer) error { + if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() { + return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName()) + } + if err := m.marshalKnownFields(b); err != nil { + return err + } + return m.marshalUnknownFields(b) +} + +func (m *Message) marshalKnownFields(b *codec.Buffer) error { + for _, tag := range m.knownFieldTags() { + itag := int32(tag) + val := m.values[itag] + fd := m.FindFieldDescriptor(itag) + if fd == nil { + panic(fmt.Sprintf("Couldn't find field for tag %d", itag)) + } + if err := b.EncodeFieldValue(fd, val); err != nil { + return err + } + } + return nil +} + +func (m *Message) marshalUnknownFields(b *codec.Buffer) error { + for _, tag := range m.unknownFieldTags() { + itag := int32(tag) + sl := m.unknownFields[itag] + for _, u := range sl { + if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil { + return err + } + switch u.Encoding { + case proto.WireBytes: + if err := b.EncodeRawBytes(u.Contents); err != nil { + return err + } + case proto.WireStartGroup: + _, _ = b.Write(u.Contents) + if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil { + return err + } + case proto.WireFixed32: + if err := b.EncodeFixed32(u.Value); err != nil { + return err + } + case proto.WireFixed64: + if err := b.EncodeFixed64(u.Value); err != nil { + return err + } + case proto.WireVarint: + if err := b.EncodeVarint(u.Value); err != nil { + return err + } + default: + return codec.ErrBadWireType + } + } + } + return nil +} + +// Unmarshal de-serializes the message that is present in the given bytes into +// this message. It first resets the current message. It returns an error if the +// given bytes do not contain a valid encoding of this message type. +func (m *Message) Unmarshal(b []byte) error { + m.Reset() + if err := m.UnmarshalMerge(b); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMerge de-serializes the message that is present in the given bytes +// into this message. Unlike Unmarshal, it does not first reset the message, +// instead merging the data in the given bytes into the existing data in this +// message. +func (m *Message) UnmarshalMerge(b []byte) error { + return m.unmarshal(codec.NewBuffer(b), false) +} + +func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error { + if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() { + return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName()) + } + for !buf.EOF() { + fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf) + if err != nil { + if err == codec.ErrWireTypeEndGroup { + if isGroup { + // finished parsing group + return nil + } + return codec.ErrBadWireType + } + return err + } + + if fd == nil { + if m.unknownFields == nil { + m.unknownFields = map[int32][]UnknownField{} + } + uv := val.(codec.UnknownField) + u := UnknownField{ + Encoding: uv.Encoding, + Value: uv.Value, + Contents: uv.Contents, + } + m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u) + } else if err := mergeField(m, fd, val); err != nil { + return err + } + } + if isGroup { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go new file mode 100644 index 00000000..5d7f45e4 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go @@ -0,0 +1,159 @@ +// Package dynamic provides an implementation for a dynamic protobuf message. +// +// The dynamic message is essentially a message descriptor along with a map of +// tag numbers to values. It has a broad API for interacting with the message, +// including inspection and modification. Generally, most operations have two +// forms: a regular method that panics on bad input or error and a "Try" form +// of the method that will instead return an error. +// +// A dynamic message can optionally be constructed with a MessageFactory. The +// MessageFactory has various registries that may be used by the dynamic message, +// such as during de-serialization. The message factory is "inherited" by any +// other dynamic messages created, such as nested messages that are created +// during de-serialization. Similarly, any dynamic message created using +// MessageFactory.NewMessage will be associated with that factory, which in turn +// will be used to create other messages or parse extension fields during +// de-serialization. +// +// # Field Types +// +// The types of values expected by setters and returned by getters are the +// same as protoc generates for scalar fields. For repeated fields, there are +// methods for getting and setting values at a particular index or for adding +// an element. Similarly, for map fields, there are methods for getting and +// setting values for a particular key. +// +// If you use GetField for a repeated field, it will return a copy of all +// elements as a slice []interface{}. Similarly, using GetField for a map field +// will return a copy of all mappings as a map[interface{}]interface{}. You can +// also use SetField to supply an entire slice or map for repeated or map fields. +// The slice need not be []interface{} but can actually be typed according to +// the field's expected type. For example, a repeated uint64 field can be set +// using a slice of type []uint64. +// +// Descriptors for map fields describe them as repeated fields with a nested +// message type. The nested message type is a special generated type that +// represents a single mapping: key and value pair. The dynamic message has some +// special affordances for this representation. For example, you can use +// SetField to set a map field using a slice of these entry messages. Internally, +// the slice of entries will be converted to an actual map. Similarly, you can +// use AddRepeatedField with an entry message to add (or overwrite) a mapping. +// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps, +// since those take numeric index arguments which are not relevant to maps +// (since maps in Go have no defined ordering). +// +// When setting field values in dynamic messages, the type-checking is lenient +// in that it accepts any named type with the right kind. So a string field can +// be assigned to any type that is defined as a string. Enum fields require +// int32 values (or any type that is defined as an int32). +// +// Unlike normal use of numeric values in Go, values will be automatically +// widened when assigned. So, for example, an int64 field can be set using an +// int32 value since it can be safely widened without truncation or loss of +// precision. Similar goes for uint32 values being converted to uint64 and +// float32 being converted to float64. Narrowing conversions are not done, +// however. Also, unsigned values will never be automatically converted to +// signed (and vice versa), and floating point values will never be +// automatically converted to integral values (and vice versa). Since the bit +// width of int and uint fields is allowed to be platform dependent, but will +// always be less than or equal to 64, they can only be used as values for +// int64 and uint64 fields, respectively. They cannot be used to set int32 or +// uint32 fields, which includes enums fields. +// +// Fields whose type is a nested message can have values set to either other +// dynamic messages or generated messages (e.g. pointers to structs generated by +// protoc). Getting a value for such a field will return the actual type it is +// set to (e.g. either a dynamic message or a generated message). If the value +// is not set and the message uses proto2 syntax, the default message returned +// will be whatever is returned by the dynamic message's MessageFactory (if the +// dynamic message was not created with a factory, it will use the logic of the +// zero value factory). In most typical cases, it will return a dynamic message, +// but if the factory is configured with a KnownTypeRegistry, or if the field's +// type is a well-known type, it will return a zero value generated message. +// +// # Unrecognized Fields +// +// Unrecognized fields are preserved by the dynamic message when unmarshaling +// from the standard binary format. If the message's MessageFactory was +// configured with an ExtensionRegistry, it will be used to identify and parse +// extension fields for the message. +// +// Unrecognized fields can dynamically become recognized fields if the +// application attempts to retrieve an unrecognized field's value using a +// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the +// unknown field and move the parsed value into the message's set of known +// fields. This behavior is most suited to the use of extensions, where an +// ExtensionRegistry is not setup with all known extensions ahead of time. But +// it can even happen for non-extension fields! Here's an example scenario where +// a non-extension field can initially be unknown and become known: +// +// 1. A dynamic message is created with a descriptor, A, and then +// de-serialized from a stream of bytes. The stream includes an +// unrecognized tag T. The message will include tag T in its unrecognized +// field set. +// 2. Another call site retrieves a newer descriptor, A', which includes a +// newly added field with tag T. +// 3. That other call site then uses a FieldDescriptor to access the value of +// the new field. This will cause the dynamic message to parse the bytes +// for the unknown tag T and store them as a known field. +// 4. Subsequent operations for tag T, including setting the field using only +// tag number or de-serializing a stream that includes tag T, will operate +// as if that tag were part of the original descriptor, A. +// +// # Compatibility +// +// In addition to implementing the proto.Message interface, the included +// Message type also provides an XXX_MessageName() method, so it can work with +// proto.MessageName. And it provides a Descriptor() method that behaves just +// like the method of the same signature in messages generated by protoc. +// Because of this, it is actually compatible with proto.Message in many (though +// not all) contexts. In particular, it is compatible with proto.Marshal and +// proto.Unmarshal for serializing and de-serializing messages. +// +// The dynamic message supports binary and text marshaling, using protobuf's +// well-defined binary format and the same text format that protoc-generated +// types use. It also supports JSON serialization/de-serialization by +// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic +// messages can safely be used with the jsonpb package for JSON serialization +// and de-serialization. +// +// In addition to implementing the proto.Message interface and numerous related +// methods, it also provides inter-op with generated messages via conversion. +// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message +// contents from a dynamic message to a generated message and vice versa. +// +// When copying from a generated message into a dynamic message, if the +// generated message contains fields unknown to the dynamic message (e.g. not +// present in the descriptor used to create the dynamic message), these fields +// become known to the dynamic message (as per behavior described above in +// "Unrecognized Fields"). If the generated message has unrecognized fields of +// its own, including unrecognized extensions, they are preserved in the dynamic +// message. It is possible that the dynamic message knows about fields that the +// generated message did not, like if it has a different version of the +// descriptor or its MessageFactory has an ExtensionRegistry that knows about +// different extensions than were linked into the program. In this case, these +// unrecognized fields in the generated message will be known fields in the +// dynamic message. +// +// Similarly, when copying from a dynamic message into a generated message, if +// the dynamic message has unrecognized fields they can be preserved in the +// generated message (currently only for syntax proto2 since proto3 generated +// messages do not preserve unrecognized fields). If the generated message knows +// about fields that the dynamic message does not, these unrecognized fields may +// become known fields in the generated message. +// +// # Registries +// +// This package also contains a couple of registries, for managing known types +// and descriptors. +// +// The KnownTypeRegistry allows de-serialization of a dynamic message to use +// generated message types, instead of dynamic messages, for some kinds of +// nested message fields. This is particularly useful for working with proto +// messages that have special encodings as JSON (e.g. the well-known types), +// since the dynamic message does not try to handle these special cases in its +// JSON marshaling facilities. +// +// The ExtensionRegistry allows for recognizing and parsing extensions fields +// (for proto2 messages). +package dynamic diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go new file mode 100644 index 00000000..ff136b0e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go @@ -0,0 +1,2830 @@ +package dynamic + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/internal" +) + +// ErrUnknownTagNumber is an error that is returned when an operation refers +// to an unknown tag number. +var ErrUnknownTagNumber = errors.New("unknown tag number") + +// UnknownTagNumberError is the same as ErrUnknownTagNumber. +// Deprecated: use ErrUnknownTagNumber +var UnknownTagNumberError = ErrUnknownTagNumber + +// ErrUnknownFieldName is an error that is returned when an operation refers +// to an unknown field name. +var ErrUnknownFieldName = errors.New("unknown field name") + +// UnknownFieldNameError is the same as ErrUnknownFieldName. +// Deprecated: use ErrUnknownFieldName +var UnknownFieldNameError = ErrUnknownFieldName + +// ErrFieldIsNotMap is an error that is returned when map-related operations +// are attempted with fields that are not maps. +var ErrFieldIsNotMap = errors.New("field is not a map type") + +// FieldIsNotMapError is the same as ErrFieldIsNotMap. +// Deprecated: use ErrFieldIsNotMap +var FieldIsNotMapError = ErrFieldIsNotMap + +// ErrFieldIsNotRepeated is an error that is returned when repeated field +// operations are attempted with fields that are not repeated. +var ErrFieldIsNotRepeated = errors.New("field is not repeated") + +// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated. +// Deprecated: use ErrFieldIsNotRepeated +var FieldIsNotRepeatedError = ErrFieldIsNotRepeated + +// ErrIndexOutOfRange is an error that is returned when an invalid index is +// provided when access a single element of a repeated field. +var ErrIndexOutOfRange = errors.New("index is out of range") + +// IndexOutOfRangeError is the same as ErrIndexOutOfRange. +// Deprecated: use ErrIndexOutOfRange +var IndexOutOfRangeError = ErrIndexOutOfRange + +// ErrNumericOverflow is an error returned by operations that encounter a +// numeric value that is too large, for example de-serializing a value into an +// int32 field when the value is larger that can fit into a 32-bit value. +var ErrNumericOverflow = errors.New("numeric value is out of range") + +// NumericOverflowError is the same as ErrNumericOverflow. +// Deprecated: use ErrNumericOverflow +var NumericOverflowError = ErrNumericOverflow + +var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil)) +var typeOfBytes = reflect.TypeOf(([]byte)(nil)) + +// Message is a dynamic protobuf message. Instead of a generated struct, +// like most protobuf messages, this is a map of field number to values and +// a message descriptor, which is used to validate the field values and +// also to de-serialize messages (from the standard binary format, as well +// as from the text format and from JSON). +type Message struct { + md *desc.MessageDescriptor + er *ExtensionRegistry + mf *MessageFactory + extraFields map[int32]*desc.FieldDescriptor + values map[int32]interface{} + unknownFields map[int32][]UnknownField +} + +// UnknownField represents a field that was parsed from the binary wire +// format for a message, but was not a recognized field number. Enough +// information is preserved so that re-serializing the message won't lose +// any of the unrecognized data. +type UnknownField struct { + // Encoding indicates how the unknown field was encoded on the wire. If it + // is proto.WireBytes or proto.WireGroupStart then Contents will be set to + // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least + // significant 32 bits of Value. Otherwise, the data is in all 64 bits of + // Value. + Encoding int8 + Contents []byte + Value uint64 +} + +// NewMessage creates a new dynamic message for the type represented by the given +// message descriptor. During de-serialization, a default MessageFactory is used to +// instantiate any nested message fields and no extension fields will be parsed. To +// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage. +func NewMessage(md *desc.MessageDescriptor) *Message { + return NewMessageWithMessageFactory(md, nil) +} + +// NewMessageWithExtensionRegistry creates a new dynamic message for the type +// represented by the given message descriptor. During de-serialization, the given +// ExtensionRegistry is used to parse extension fields and nested messages will be +// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er). +func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message { + mf := NewMessageFactoryWithExtensionRegistry(er) + return NewMessageWithMessageFactory(md, mf) +} + +// NewMessageWithMessageFactory creates a new dynamic message for the type +// represented by the given message descriptor. During de-serialization, the given +// MessageFactory is used to instantiate nested messages. +func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message { + var er *ExtensionRegistry + if mf != nil { + er = mf.er + } + return &Message{ + md: md, + mf: mf, + er: er, + } +} + +// AsDynamicMessage converts the given message to a dynamic message. If the +// given message is dynamic, it is returned. Otherwise, a dynamic message is +// created using NewMessage. +func AsDynamicMessage(msg proto.Message) (*Message, error) { + return AsDynamicMessageWithMessageFactory(msg, nil) +} + +// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic +// message. If the given message is dynamic, it is returned. Otherwise, a +// dynamic message is created using NewMessageWithExtensionRegistry. +func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) { + mf := NewMessageFactoryWithExtensionRegistry(er) + return AsDynamicMessageWithMessageFactory(msg, mf) +} + +// AsDynamicMessageWithMessageFactory converts the given message to a dynamic +// message. If the given message is dynamic, it is returned. Otherwise, a +// dynamic message is created using NewMessageWithMessageFactory. +func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) { + if dm, ok := msg.(*Message); ok { + return dm, nil + } + md, err := desc.LoadMessageDescriptorForMessage(msg) + if err != nil { + return nil, err + } + dm := NewMessageWithMessageFactory(md, mf) + err = dm.mergeFrom(msg) + if err != nil { + return nil, err + } + return dm, nil +} + +// GetMessageDescriptor returns a descriptor for this message's type. +func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor { + return m.md +} + +// GetKnownFields returns a slice of descriptors for all known fields. The +// fields will not be in any defined order. +func (m *Message) GetKnownFields() []*desc.FieldDescriptor { + if len(m.extraFields) == 0 { + return m.md.GetFields() + } + flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields)) + copy(flds, m.md.GetFields()) + for _, fld := range m.extraFields { + if !fld.IsExtension() { + flds = append(flds, fld) + } + } + return flds +} + +// GetKnownExtensions returns a slice of descriptors for all extensions known by +// the message's extension registry. The fields will not be in any defined order. +func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor { + if !m.md.IsExtendable() { + return nil + } + exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName()) + for _, fld := range m.extraFields { + if fld.IsExtension() { + exts = append(exts, fld) + } + } + return exts +} + +// GetUnknownFields returns a slice of tag numbers for all unknown fields that +// this message contains. The tags will not be in any defined order. +func (m *Message) GetUnknownFields() []int32 { + flds := make([]int32, 0, len(m.unknownFields)) + for tag := range m.unknownFields { + flds = append(flds, tag) + } + return flds +} + +// Descriptor returns the serialized form of the file descriptor in which the +// message was defined and a path to the message type therein. This mimics the +// method of the same name on message types generated by protoc. +func (m *Message) Descriptor() ([]byte, []int) { + // get encoded file descriptor + b, err := proto.Marshal(m.md.GetFile().AsProto()) + if err != nil { + panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + var zippedBytes bytes.Buffer + w := gzip.NewWriter(&zippedBytes) + if _, err := w.Write(b); err != nil { + panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + if err := w.Close(); err != nil { + panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err)) + } + + // and path to message + path := []int{} + var d desc.Descriptor + name := m.md.GetFullyQualifiedName() + for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() { + found := false + switch d := d.(type) { + case (*desc.FileDescriptor): + for i, md := range d.GetMessageTypes() { + if md.GetFullyQualifiedName() == name { + found = true + path = append(path, i) + } + } + case (*desc.MessageDescriptor): + for i, md := range d.GetNestedMessageTypes() { + if md.GetFullyQualifiedName() == name { + found = true + path = append(path, i) + } + } + } + if !found { + panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName())) + } + } + // reverse the path + i := 0 + j := len(path) - 1 + for i < j { + path[i], path[j] = path[j], path[i] + i++ + j-- + } + + return zippedBytes.Bytes(), path +} + +// XXX_MessageName returns the fully qualified name of this message's type. This +// allows dynamic messages to be used with proto.MessageName. +func (m *Message) XXX_MessageName() string { + return m.md.GetFullyQualifiedName() +} + +// FindFieldDescriptor returns a field descriptor for the given tag number. This +// searches known fields in the descriptor, known fields discovered during calls +// to GetField or SetField, and extension fields known by the message's extension +// registry. It returns nil if the tag is unknown. +func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor { + fd := m.md.FindFieldByNumber(tagNumber) + if fd != nil { + return fd + } + fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber) + if fd != nil { + return fd + } + return m.extraFields[tagNumber] +} + +// FindFieldDescriptorByName returns a field descriptor for the given field +// name. This searches known fields in the descriptor, known fields discovered +// during calls to GetField or SetField, and extension fields known by the +// message's extension registry. It returns nil if the name is unknown. If the +// given name refers to an extension, it should be fully qualified and may be +// optionally enclosed in parentheses or brackets. +func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor { + if name == "" { + return nil + } + fd := m.md.FindFieldByName(name) + if fd != nil { + return fd + } + mustBeExt := false + if name[0] == '(' { + if name[len(name)-1] != ')' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } else if name[0] == '[' { + if name[len(name)-1] != ']' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } + fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name) + if fd != nil { + return fd + } + for _, fd := range m.extraFields { + if fd.IsExtension() && name == fd.GetFullyQualifiedName() { + return fd + } else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() { + return fd + } + } + + return nil +} + +// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON +// name. This searches known fields in the descriptor, known fields discovered +// during calls to GetField or SetField, and extension fields known by the +// message's extension registry. If no field matches the given JSON name, it +// will fall back to searching field names (e.g. FindFieldDescriptorByName). If +// this also yields no match, nil is returned. +func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor { + if name == "" { + return nil + } + fd := m.md.FindFieldByJSONName(name) + if fd != nil { + return fd + } + mustBeExt := false + if name[0] == '(' { + if name[len(name)-1] != ')' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } else if name[0] == '[' { + if name[len(name)-1] != ']' { + // malformed name + return nil + } + mustBeExt = true + name = name[1 : len(name)-1] + } + fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name) + if fd != nil { + return fd + } + for _, fd := range m.extraFields { + if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() { + return fd + } else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() { + return fd + } + } + + // try non-JSON names + return m.FindFieldDescriptorByName(name) +} + +func (m *Message) checkField(fd *desc.FieldDescriptor) error { + return checkField(fd, m.md) +} + +func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error { + if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() { + return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName()) + } + if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) { + return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges()) + } + return nil +} + +// GetField returns the value for the given field descriptor. It panics if an +// error is encountered. See TryGetField. +func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} { + if v, err := m.TryGetField(fd); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetField returns the value for the given field descriptor. An error is +// returned if the given field descriptor does not belong to the right message +// type. +// +// The Go type of the returned value, for scalar fields, is the same as protoc +// would generate for the field (in a non-dynamic message). The table below +// lists the scalar types and the corresponding Go types. +// +// +-------------------------+-----------+ +// | Declared Type | Go Type | +// +-------------------------+-----------+ +// | int32, sint32, sfixed32 | int32 | +// | int64, sint64, sfixed64 | int64 | +// | uint32, fixed32 | uint32 | +// | uint64, fixed64 | uint64 | +// | float | float32 | +// | double | double32 | +// | bool | bool | +// | string | string | +// | bytes | []byte | +// +-------------------------+-----------+ +// +// Values for enum fields will always be int32 values. You can use the enum +// descriptor associated with the field to lookup value names with those values. +// Values for message type fields may be an instance of the generated type *or* +// may be another *dynamic.Message that represents the type. +// +// If the given field is a map field, the returned type will be +// map[interface{}]interface{}. The actual concrete types of keys and values is +// as described above. If the given field is a (non-map) repeated field, the +// returned type is always []interface{}; the type of the actual elements is as +// described above. +// +// If this message has no value for the given field, its default value is +// returned. If the message is defined in a file with "proto3" syntax, the +// default is always the zero value for the field. The default value for map and +// repeated fields is a nil map or slice (respectively). For field's whose types +// is a message, the default value is an empty message for "proto2" syntax or a +// nil message for "proto3" syntax. Note that the in the latter case, a non-nil +// interface with a nil pointer is returned, not a nil interface. Also note that +// whether the returned value is an empty message or nil depends on if *this* +// message was defined as "proto3" syntax, not the message type referred to by +// the field's type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be returned, or an error will +// be returned if the unknown value cannot be parsed according to the field +// descriptor's type information. +func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) { + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getField(fd) +} + +// GetFieldByName returns the value for the field with the given name. It panics +// if an error is encountered. See TryGetFieldByName. +func (m *Message) GetFieldByName(name string) interface{} { + if v, err := m.TryGetFieldByName(name); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetFieldByName returns the value for the field with the given name. An +// error is returned if the given name is unknown. If the given name refers to +// an extension field, it should be fully qualified and optionally enclosed in +// parenthesis or brackets. +// +// If this message has no value for the given field, its default value is +// returned. (See TryGetField for more info on types and default field values.) +func (m *Message) TryGetFieldByName(name string) (interface{}, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getField(fd) +} + +// GetFieldByNumber returns the value for the field with the given tag number. +// It panics if an error is encountered. See TryGetFieldByNumber. +func (m *Message) GetFieldByNumber(tagNumber int) interface{} { + if v, err := m.TryGetFieldByNumber(tagNumber); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetFieldByNumber returns the value for the field with the given tag +// number. An error is returned if the given tag is unknown. +// +// If this message has no value for the given field, its default value is +// returned. (See TryGetField for more info on types and default field values.) +func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getField(fd) +} + +func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) { + return m.doGetField(fd, false) +} + +func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) { + res := m.values[fd.GetNumber()] + if res == nil { + var err error + if res, err = m.parseUnknownField(fd); err != nil { + return nil, err + } + if res == nil { + if nilIfAbsent { + return nil, nil + } else { + def := fd.GetDefaultValue() + if def != nil { + return def, nil + } + // GetDefaultValue only returns nil for message types + md := fd.GetMessageType() + if m.md.IsProto3() { + return nilMessage(md), nil + } else { + // for proto2, return default instance of message + return m.mf.NewMessage(md), nil + } + } + } + } + rt := reflect.TypeOf(res) + if rt.Kind() == reflect.Map { + // make defensive copies to prevent caller from storing illegal keys and values + m := res.(map[interface{}]interface{}) + res := map[interface{}]interface{}{} + for k, v := range m { + res[k] = v + } + return res, nil + } else if rt.Kind() == reflect.Slice && rt != typeOfBytes { + // make defensive copies to prevent caller from storing illegal elements + sl := res.([]interface{}) + res := make([]interface{}, len(sl)) + copy(res, sl) + return res, nil + } + return res, nil +} + +func nilMessage(md *desc.MessageDescriptor) interface{} { + // try to return a proper nil pointer + msgType := proto.MessageType(md.GetFullyQualifiedName()) + if msgType != nil && msgType.Implements(typeOfProtoMessage) { + return reflect.Zero(msgType).Interface().(proto.Message) + } + // fallback to nil dynamic message pointer + return (*Message)(nil) +} + +// HasField returns true if this message has a value for the given field. If the +// given field is not valid (e.g. belongs to a different message type), false is +// returned. If this message is defined in a file with "proto3" syntax, this +// will return false even if a field was explicitly assigned its zero value (the +// zero values for a field are intentionally indistinguishable from absent). +func (m *Message) HasField(fd *desc.FieldDescriptor) bool { + if err := m.checkField(fd); err != nil { + return false + } + return m.HasFieldNumber(int(fd.GetNumber())) +} + +// HasFieldName returns true if this message has a value for a field with the +// given name. If the given name is unknown, this returns false. +func (m *Message) HasFieldName(name string) bool { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return false + } + return m.HasFieldNumber(int(fd.GetNumber())) +} + +// HasFieldNumber returns true if this message has a value for a field with the +// given tag number. If the given tag is unknown, this returns false. +func (m *Message) HasFieldNumber(tagNumber int) bool { + if _, ok := m.values[int32(tagNumber)]; ok { + return true + } + _, ok := m.unknownFields[int32(tagNumber)] + return ok +} + +// SetField sets the value for the given field descriptor to the given value. It +// panics if an error is encountered. See TrySetField. +func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) { + if err := m.TrySetField(fd, val); err != nil { + panic(err.Error()) + } +} + +// TrySetField sets the value for the given field descriptor to the given value. +// An error is returned if the given field descriptor does not belong to the +// right message type or if the given value is not a correct/compatible type for +// the given field. +// +// The Go type expected for a field is the same as TryGetField would return for +// the field. So message values can be supplied as either the correct generated +// message type or as a *dynamic.Message. +// +// Since it is cumbersome to work with dynamic messages, some concessions are +// made to simplify usage regarding types: +// +// 1. If a numeric type is provided that can be converted *without loss or +// overflow*, it is accepted. This allows for setting int64 fields using int +// or int32 values. Similarly for uint64 with uint and uint32 values and for +// float64 fields with float32 values. +// 2. The value can be a named type, as long as its underlying type is correct. +// 3. Map and repeated fields can be set using any kind of concrete map or +// slice type, as long as the values within are all of the correct type. So +// a field defined as a 'map` can be set using a +// map[string]int32, a map[string]interface{}, or even a +// map[interface{}]interface{}. +// 4. Finally, dynamic code that chooses to not treat maps as a special-case +// find that they can set map fields using a slice where each element is a +// message that matches the implicit map-entry field message type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is cleared, replaced by the given known +// value. +func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.setField(fd, val) +} + +// SetFieldByName sets the value for the field with the given name to the given +// value. It panics if an error is encountered. See TrySetFieldByName. +func (m *Message) SetFieldByName(name string, val interface{}) { + if err := m.TrySetFieldByName(name, val); err != nil { + panic(err.Error()) + } +} + +// TrySetFieldByName sets the value for the field with the given name to the +// given value. An error is returned if the given name is unknown or if the +// given value has an incorrect type. If the given name refers to an extension +// field, it should be fully qualified and optionally enclosed in parenthesis or +// brackets. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetFieldByName(name string, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.setField(fd, val) +} + +// SetFieldByNumber sets the value for the field with the given tag number to +// the given value. It panics if an error is encountered. See +// TrySetFieldByNumber. +func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) { + if err := m.TrySetFieldByNumber(tagNumber, val); err != nil { + panic(err.Error()) + } +} + +// TrySetFieldByNumber sets the value for the field with the given tag number to +// the given value. An error is returned if the given tag is unknown or if the +// given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.setField(fd, val) +} + +func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error { + var err error + if val, err = validFieldValue(fd, val); err != nil { + return err + } + m.internalSetField(fd, val) + return nil +} + +func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) { + if fd.IsRepeated() { + // Unset fields and zero-length fields are indistinguishable, in both + // proto2 and proto3 syntax + if reflect.ValueOf(val).Len() == 0 { + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + return + } + } else if m.md.IsProto3() && fd.GetOneOf() == nil { + // proto3 considers fields that are set to their zero value as unset + // (we already handled repeated fields above) + var equal bool + if b, ok := val.([]byte); ok { + // can't compare slices, so we have to special-case []byte values + equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte)) + } else { + defVal := fd.GetDefaultValue() + equal = defVal == val + if !equal && defVal == nil { + // above just checks if value is the nil interface, + // but we should also test if the given value is a + // nil pointer + rv := reflect.ValueOf(val) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + equal = true + } + } + } + if equal { + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + return + } + } + if m.values == nil { + m.values = map[int32]interface{}{} + } + m.values[fd.GetNumber()] = val + // if this field is part of a one-of, make sure all other one-of choices are cleared + od := fd.GetOneOf() + if od != nil { + for _, other := range od.GetChoices() { + if other.GetNumber() != fd.GetNumber() { + delete(m.values, other.GetNumber()) + } + } + } + // also clear any unknown fields + if m.unknownFields != nil { + delete(m.unknownFields, fd.GetNumber()) + } + // and add this field if it was previously unknown + if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil { + m.addField(fd) + } +} + +func (m *Message) addField(fd *desc.FieldDescriptor) { + if m.extraFields == nil { + m.extraFields = map[int32]*desc.FieldDescriptor{} + } + m.extraFields[fd.GetNumber()] = fd +} + +// ClearField removes any value for the given field. It panics if an error is +// encountered. See TryClearField. +func (m *Message) ClearField(fd *desc.FieldDescriptor) { + if err := m.TryClearField(fd); err != nil { + panic(err.Error()) + } +} + +// TryClearField removes any value for the given field. An error is returned if +// the given field descriptor does not belong to the right message type. +func (m *Message) TryClearField(fd *desc.FieldDescriptor) error { + if err := m.checkField(fd); err != nil { + return err + } + m.clearField(fd) + return nil +} + +// ClearFieldByName removes any value for the field with the given name. It +// panics if an error is encountered. See TryClearFieldByName. +func (m *Message) ClearFieldByName(name string) { + if err := m.TryClearFieldByName(name); err != nil { + panic(err.Error()) + } +} + +// TryClearFieldByName removes any value for the field with the given name. An +// error is returned if the given name is unknown. If the given name refers to +// an extension field, it should be fully qualified and optionally enclosed in +// parenthesis or brackets. +func (m *Message) TryClearFieldByName(name string) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + m.clearField(fd) + return nil +} + +// ClearFieldByNumber removes any value for the field with the given tag number. +// It panics if an error is encountered. See TryClearFieldByNumber. +func (m *Message) ClearFieldByNumber(tagNumber int) { + if err := m.TryClearFieldByNumber(tagNumber); err != nil { + panic(err.Error()) + } +} + +// TryClearFieldByNumber removes any value for the field with the given tag +// number. An error is returned if the given tag is unknown. +func (m *Message) TryClearFieldByNumber(tagNumber int) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + m.clearField(fd) + return nil +} + +func (m *Message) clearField(fd *desc.FieldDescriptor) { + // clear value + if m.values != nil { + delete(m.values, fd.GetNumber()) + } + // also clear any unknown fields + if m.unknownFields != nil { + delete(m.unknownFields, fd.GetNumber()) + } + // and add this field if it was previously unknown + if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil { + m.addField(fd) + } +} + +// GetOneOfField returns which of the given one-of's fields is set and the +// corresponding value. It panics if an error is encountered. See +// TryGetOneOfField. +func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) { + if fd, val, err := m.TryGetOneOfField(od); err != nil { + panic(err.Error()) + } else { + return fd, val + } +} + +// TryGetOneOfField returns which of the given one-of's fields is set and the +// corresponding value. An error is returned if the given one-of belongs to the +// wrong message type. If the given one-of has no field set, this method will +// return nil, nil. +// +// The type of the value, if one is set, is the same as would be returned by +// TryGetField using the returned field descriptor. +// +// Like with TryGetField, if the given one-of contains any fields that are not +// known (e.g. not present in this message's descriptor), they will become known +// and any unknown value will be parsed (and become a known value on success). +func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) { + if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + for _, fd := range od.GetChoices() { + val, err := m.doGetField(fd, true) + if err != nil { + return nil, nil, err + } + if val != nil { + return fd, val, nil + } + } + return nil, nil, nil +} + +// ClearOneOfField removes any value for any of the given one-of's fields. It +// panics if an error is encountered. See TryClearOneOfField. +func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) { + if err := m.TryClearOneOfField(od); err != nil { + panic(err.Error()) + } +} + +// TryClearOneOfField removes any value for any of the given one-of's fields. An +// error is returned if the given one-of descriptor does not belong to the right +// message type. +func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error { + if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + for _, fd := range od.GetChoices() { + m.clearField(fd) + } + return nil +} + +// GetMapField returns the value for the given map field descriptor and given +// key. It panics if an error is encountered. See TryGetMapField. +func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} { + if v, err := m.TryGetMapField(fd, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapField returns the value for the given map field descriptor and given +// key. An error is returned if the given field descriptor does not belong to +// the right message type or if it is not a map field. +// +// If the map field does not contain the requested key, this method returns +// nil, nil. The Go type of the value returned mirrors the type that protoc +// would generate for the field. (See TryGetField for more details on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be searched for the requested +// key and any value returned. An error will be returned if the unknown value +// cannot be parsed according to the field descriptor's type information. +func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) { + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getMapField(fd, key) +} + +// GetMapFieldByName returns the value for the map field with the given name and +// given key. It panics if an error is encountered. See TryGetMapFieldByName. +func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} { + if v, err := m.TryGetMapFieldByName(name, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapFieldByName returns the value for the map field with the given name +// and given key. An error is returned if the given name is unknown or if it +// names a field that is not a map field. +// +// If this message has no value for the given field or the value has no value +// for the requested key, then this method returns nil, nil. +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getMapField(fd, key) +} + +// GetMapFieldByNumber returns the value for the map field with the given tag +// number and given key. It panics if an error is encountered. See +// TryGetMapFieldByNumber. +func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} { + if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetMapFieldByNumber returns the value for the map field with the given tag +// number and given key. An error is returned if the given tag is unknown or if +// it indicates a field that is not a map field. +// +// If this message has no value for the given field or the value has no value +// for the requested key, then this method returns nil, nil. +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getMapField(fd, key) +} + +func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) { + if !fd.IsMap() { + return nil, FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return nil, err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return nil, err + } else if mp == nil { + return nil, nil + } + } + return mp.(map[interface{}]interface{})[ki], nil +} + +// ForEachMapFieldEntry executes the given function for each entry in the map +// value for the given field descriptor. It stops iteration if the function +// returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntry. +func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntry(fd, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntry executes the given function for each entry in the map +// value for the given field descriptor. An error is returned if the given field +// descriptor does not belong to the right message type or if it is not a map +// field. +// +// Iteration ends either when all entries have been examined or when the given +// function returns false. So the function is expected to return true for normal +// iteration and false to break out. If this message has no value for the given +// field, it returns without invoking the given function. +// +// The Go type of the key and value supplied to the function mirrors the type +// that protoc would generate for the field. (See TryGetField for more details +// on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The parsed value will be searched for the requested +// key and any value returned. An error will be returned if the unknown value +// cannot be parsed according to the field descriptor's type information. +func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.forEachMapFieldEntry(fd, fn) +} + +// ForEachMapFieldEntryByName executes the given function for each entry in the +// map value for the field with the given name. It stops iteration if the +// function returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntryByName. +func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntryByName executes the given function for each entry in +// the map value for the field with the given name. It stops iteration if the +// function returns false. An error is returned if the given name is unknown or +// if it names a field that is not a map field. +// +// If this message has no value for the given field, it returns without ever +// invoking the given function. +// +// (See TryGetField for more info on types supplied to the function.) +func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.forEachMapFieldEntry(fd, fn) +} + +// ForEachMapFieldEntryByNumber executes the given function for each entry in +// the map value for the field with the given tag number. It stops iteration if +// the function returns false. It panics if an error is encountered. See +// TryForEachMapFieldEntryByNumber. +func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) { + if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil { + panic(err.Error()) + } +} + +// TryForEachMapFieldEntryByNumber executes the given function for each entry in +// the map value for the field with the given tag number. It stops iteration if +// the function returns false. An error is returned if the given tag is unknown +// or if it indicates a field that is not a map field. +// +// If this message has no value for the given field, it returns without ever +// invoking the given function. +// +// (See TryGetField for more info on types supplied to the function.) +func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.forEachMapFieldEntry(fd, fn) +} + +func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err := m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + return nil + } + } + for k, v := range mp.(map[interface{}]interface{}) { + if !fn(k, v) { + break + } + } + return nil +} + +// PutMapField sets the value for the given map field descriptor and given key +// to the given value. It panics if an error is encountered. See TryPutMapField. +func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) { + if err := m.TryPutMapField(fd, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapField sets the value for the given map field descriptor and given +// key to the given value. An error is returned if the given field descriptor +// does not belong to the right message type, if the given field is not a map +// field, or if the given value is not a correct/compatible type for the given +// field. +// +// The Go type expected for a field is the same as required by TrySetField for +// a field with the same type as the map's value type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is cleared, replaced by the given known +// value. +func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.putMapField(fd, key, val) +} + +// PutMapFieldByName sets the value for the map field with the given name and +// given key to the given value. It panics if an error is encountered. See +// TryPutMapFieldByName. +func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) { + if err := m.TryPutMapFieldByName(name, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapFieldByName sets the value for the map field with the given name and +// the given key to the given value. An error is returned if the given name is +// unknown, if it names a field that is not a map, or if the given value has an +// incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.putMapField(fd, key, val) +} + +// PutMapFieldByNumber sets the value for the map field with the given tag +// number and given key to the given value. It panics if an error is +// encountered. See TryPutMapFieldByNumber. +func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) { + if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil { + panic(err.Error()) + } +} + +// TryPutMapFieldByNumber sets the value for the map field with the given tag +// number and the given key to the given value. An error is returned if the +// given tag is unknown, if it indicates a field that is not a map, or if the +// given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.putMapField(fd, key, val) +} + +func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return err + } + vfd := fd.GetMessageType().GetFields()[1] + vi, err := validElementFieldValue(vfd, val, true) + if err != nil { + return err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + m.internalSetField(fd, map[interface{}]interface{}{ki: vi}) + return nil + } + } + mp.(map[interface{}]interface{})[ki] = vi + return nil +} + +// RemoveMapField changes the value for the given field descriptor by removing +// any value associated with the given key. It panics if an error is +// encountered. See TryRemoveMapField. +func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) { + if err := m.TryRemoveMapField(fd, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapField changes the value for the given field descriptor by +// removing any value associated with the given key. An error is returned if the +// given field descriptor does not belong to the right message type or if the +// given field is not a map field. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and any value for the given key +// removed. +func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.removeMapField(fd, key) +} + +// RemoveMapFieldByName changes the value for the field with the given name by +// removing any value associated with the given key. It panics if an error is +// encountered. See TryRemoveMapFieldByName. +func (m *Message) RemoveMapFieldByName(name string, key interface{}) { + if err := m.TryRemoveMapFieldByName(name, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapFieldByName changes the value for the field with the given name +// by removing any value associated with the given key. An error is returned if +// the given name is unknown or if it names a field that is not a map. +func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.removeMapField(fd, key) +} + +// RemoveMapFieldByNumber changes the value for the field with the given tag +// number by removing any value associated with the given key. It panics if an +// error is encountered. See TryRemoveMapFieldByNumber. +func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) { + if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil { + panic(err.Error()) + } +} + +// TryRemoveMapFieldByNumber changes the value for the field with the given tag +// number by removing any value associated with the given key. An error is +// returned if the given tag is unknown or if it indicates a field that is not +// a map. +func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.removeMapField(fd, key) +} + +func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error { + if !fd.IsMap() { + return FieldIsNotMapError + } + kfd := fd.GetMessageType().GetFields()[0] + ki, err := validElementFieldValue(kfd, key, false) + if err != nil { + return err + } + mp := m.values[fd.GetNumber()] + if mp == nil { + if mp, err = m.parseUnknownField(fd); err != nil { + return err + } else if mp == nil { + return nil + } + } + res := mp.(map[interface{}]interface{}) + delete(res, ki) + if len(res) == 0 { + delete(m.values, fd.GetNumber()) + } + return nil +} + +// FieldLength returns the number of elements in this message for the given +// field descriptor. It panics if an error is encountered. See TryFieldLength. +func (m *Message) FieldLength(fd *desc.FieldDescriptor) int { + l, err := m.TryFieldLength(fd) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLength returns the number of elements in this message for the given +// field descriptor. An error is returned if the given field descriptor does not +// belong to the right message type or if it is neither a map field nor a +// repeated field. +func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) { + if err := m.checkField(fd); err != nil { + return 0, err + } + return m.fieldLength(fd) +} + +// FieldLengthByName returns the number of elements in this message for the +// field with the given name. It panics if an error is encountered. See +// TryFieldLengthByName. +func (m *Message) FieldLengthByName(name string) int { + l, err := m.TryFieldLengthByName(name) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLengthByName returns the number of elements in this message for the +// field with the given name. An error is returned if the given name is unknown +// or if the named field is neither a map field nor a repeated field. +func (m *Message) TryFieldLengthByName(name string) (int, error) { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return 0, UnknownFieldNameError + } + return m.fieldLength(fd) +} + +// FieldLengthByNumber returns the number of elements in this message for the +// field with the given tag number. It panics if an error is encountered. See +// TryFieldLengthByNumber. +func (m *Message) FieldLengthByNumber(tagNumber int32) int { + l, err := m.TryFieldLengthByNumber(tagNumber) + if err != nil { + panic(err.Error()) + } + return l +} + +// TryFieldLengthByNumber returns the number of elements in this message for the +// field with the given tag number. An error is returned if the given tag is +// unknown or if the named field is neither a map field nor a repeated field. +func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return 0, UnknownTagNumberError + } + return m.fieldLength(fd) +} + +func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) { + if !fd.IsRepeated() { + return 0, FieldIsNotRepeatedError + } + val := m.values[fd.GetNumber()] + if val == nil { + var err error + if val, err = m.parseUnknownField(fd); err != nil { + return 0, err + } else if val == nil { + return 0, nil + } + } + if sl, ok := val.([]interface{}); ok { + return len(sl), nil + } else if mp, ok := val.(map[interface{}]interface{}); ok { + return len(mp), nil + } + return 0, nil +} + +// GetRepeatedField returns the value for the given repeated field descriptor at +// the given index. It panics if an error is encountered. See +// TryGetRepeatedField. +func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} { + if v, err := m.TryGetRepeatedField(fd, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedField returns the value for the given repeated field descriptor +// at the given index. An error is returned if the given field descriptor does +// not belong to the right message type, if it is not a repeated field, or if +// the given index is out of range (less than zero or greater than or equal to +// the length of the repeated field). Also, even though map fields technically +// are repeated fields, if the given field is a map field an error will result: +// map representation does not lend itself to random access by index. +// +// The Go type of the value returned mirrors the type that protoc would generate +// for the field's element type. (See TryGetField for more details on types). +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) but corresponds to an unknown field, the unknown value will be +// parsed and become known. The value at the given index in the parsed value +// will be returned. An error will be returned if the unknown value cannot be +// parsed according to the field descriptor's type information. +func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + if err := m.checkField(fd); err != nil { + return nil, err + } + return m.getRepeatedField(fd, index) +} + +// GetRepeatedFieldByName returns the value for the repeated field with the +// given name at the given index. It panics if an error is encountered. See +// TryGetRepeatedFieldByName. +func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} { + if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedFieldByName returns the value for the repeated field with the +// given name at the given index. An error is returned if the given name is +// unknown, if it names a field that is not a repeated field (or is a map +// field), or if the given index is out of range (less than zero or greater +// than or equal to the length of the repeated field). +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return nil, UnknownFieldNameError + } + return m.getRepeatedField(fd, index) +} + +// GetRepeatedFieldByNumber returns the value for the repeated field with the +// given tag number at the given index. It panics if an error is encountered. +// See TryGetRepeatedFieldByNumber. +func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} { + if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil { + panic(err.Error()) + } else { + return v + } +} + +// TryGetRepeatedFieldByNumber returns the value for the repeated field with the +// given tag number at the given index. An error is returned if the given tag is +// unknown, if it indicates a field that is not a repeated field (or is a map +// field), or if the given index is out of range (less than zero or greater than +// or equal to the length of the repeated field). +// +// (See TryGetField for more info on types.) +func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) { + if index < 0 { + return nil, IndexOutOfRangeError + } + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return nil, UnknownTagNumberError + } + return m.getRepeatedField(fd, index) +} + +func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) { + if fd.IsMap() || !fd.IsRepeated() { + return nil, FieldIsNotRepeatedError + } + sl := m.values[fd.GetNumber()] + if sl == nil { + var err error + if sl, err = m.parseUnknownField(fd); err != nil { + return nil, err + } else if sl == nil { + return nil, IndexOutOfRangeError + } + } + res := sl.([]interface{}) + if index >= len(res) { + return nil, IndexOutOfRangeError + } + return res[index], nil +} + +// AddRepeatedField appends the given value to the given repeated field. It +// panics if an error is encountered. See TryAddRepeatedField. +func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) { + if err := m.TryAddRepeatedField(fd, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedField appends the given value to the given repeated field. An +// error is returned if the given field descriptor does not belong to the right +// message type, if the given field is not repeated, or if the given value is +// not a correct/compatible type for the given field. If the given field is a +// map field, the call will succeed if the given value is an instance of the +// map's entry message type. +// +// The Go type expected for a field is the same as required by TrySetField for +// a non-repeated field of the same type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and the given value is appended to +// it. +func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error { + if err := m.checkField(fd); err != nil { + return err + } + return m.addRepeatedField(fd, val) +} + +// AddRepeatedFieldByName appends the given value to the repeated field with the +// given name. It panics if an error is encountered. See +// TryAddRepeatedFieldByName. +func (m *Message) AddRepeatedFieldByName(name string, val interface{}) { + if err := m.TryAddRepeatedFieldByName(name, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedFieldByName appends the given value to the repeated field with +// the given name. An error is returned if the given name is unknown, if it +// names a field that is not repeated, or if the given value has an incorrect +// type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error { + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.addRepeatedField(fd, val) +} + +// AddRepeatedFieldByNumber appends the given value to the repeated field with +// the given tag number. It panics if an error is encountered. See +// TryAddRepeatedFieldByNumber. +func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) { + if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil { + panic(err.Error()) + } +} + +// TryAddRepeatedFieldByNumber appends the given value to the repeated field +// with the given tag number. An error is returned if the given tag is unknown, +// if it indicates a field that is not repeated, or if the given value has an +// incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error { + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.addRepeatedField(fd, val) +} + +func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error { + if !fd.IsRepeated() { + return FieldIsNotRepeatedError + } + val, err := validElementFieldValue(fd, val, false) + if err != nil { + return err + } + + if fd.IsMap() { + // We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow + // adding entries one at a time (as if the field were a normal repeated field). + msg := val.(proto.Message) + dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf) + if err != nil { + return err + } + k, err := dm.TryGetFieldByNumber(1) + if err != nil { + return err + } + v, err := dm.TryGetFieldByNumber(2) + if err != nil { + return err + } + return m.putMapField(fd, k, v) + } + + sl := m.values[fd.GetNumber()] + if sl == nil { + if sl, err = m.parseUnknownField(fd); err != nil { + return err + } else if sl == nil { + sl = []interface{}{} + } + } + res := sl.([]interface{}) + res = append(res, val) + m.internalSetField(fd, res) + return nil +} + +// SetRepeatedField sets the value for the given repeated field descriptor and +// given index to the given value. It panics if an error is encountered. See +// SetRepeatedField. +func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) { + if err := m.TrySetRepeatedField(fd, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedField sets the value for the given repeated field descriptor +// and given index to the given value. An error is returned if the given field +// descriptor does not belong to the right message type, if the given field is +// not repeated, or if the given value is not a correct/compatible type for the +// given field. Also, even though map fields technically are repeated fields, if +// the given field is a map field an error will result: map representation does +// not lend itself to random access by index. +// +// The Go type expected for a field is the same as required by TrySetField for +// a non-repeated field of the same type. +// +// If the given field descriptor is not known (e.g. not present in the message +// descriptor) it will become known. Subsequent operations using tag numbers or +// names will be able to resolve the newly-known type. If the message has a +// value for the unknown value, it is parsed and the element at the given index +// is replaced with the given value. +func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + if err := m.checkField(fd); err != nil { + return err + } + return m.setRepeatedField(fd, index, val) +} + +// SetRepeatedFieldByName sets the value for the repeated field with the given +// name and given index to the given value. It panics if an error is +// encountered. See TrySetRepeatedFieldByName. +func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) { + if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedFieldByName sets the value for the repeated field with the +// given name and the given index to the given value. An error is returned if +// the given name is unknown, if it names a field that is not repeated (or is a +// map field), or if the given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + fd := m.FindFieldDescriptorByName(name) + if fd == nil { + return UnknownFieldNameError + } + return m.setRepeatedField(fd, index, val) +} + +// SetRepeatedFieldByNumber sets the value for the repeated field with the given +// tag number and given index to the given value. It panics if an error is +// encountered. See TrySetRepeatedFieldByNumber. +func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) { + if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil { + panic(err.Error()) + } +} + +// TrySetRepeatedFieldByNumber sets the value for the repeated field with the +// given tag number and the given index to the given value. An error is returned +// if the given tag is unknown, if it indicates a field that is not repeated (or +// is a map field), or if the given value has an incorrect type. +// +// (See TrySetField for more info on types.) +func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error { + if index < 0 { + return IndexOutOfRangeError + } + fd := m.FindFieldDescriptor(int32(tagNumber)) + if fd == nil { + return UnknownTagNumberError + } + return m.setRepeatedField(fd, index, val) +} + +func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error { + if fd.IsMap() || !fd.IsRepeated() { + return FieldIsNotRepeatedError + } + val, err := validElementFieldValue(fd, val, false) + if err != nil { + return err + } + sl := m.values[fd.GetNumber()] + if sl == nil { + if sl, err = m.parseUnknownField(fd); err != nil { + return err + } else if sl == nil { + return IndexOutOfRangeError + } + } + res := sl.([]interface{}) + if index >= len(res) { + return IndexOutOfRangeError + } + res[index] = val + return nil +} + +// GetUnknownField gets the value(s) for the given unknown tag number. If this +// message has no unknown fields with the given tag, nil is returned. +func (m *Message) GetUnknownField(tagNumber int32) []UnknownField { + if u, ok := m.unknownFields[tagNumber]; ok { + return u + } else { + return nil + } +} + +func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) { + unks, ok := m.unknownFields[fd.GetNumber()] + if !ok { + return nil, nil + } + var v interface{} + var sl []interface{} + var mp map[interface{}]interface{} + if fd.IsMap() { + mp = map[interface{}]interface{}{} + } + var err error + for _, unk := range unks { + var val interface{} + if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup { + val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf) + } else { + val, err = codec.DecodeScalarField(fd, unk.Value) + } + if err != nil { + return nil, err + } + if fd.IsMap() { + newEntry := val.(*Message) + kk, err := newEntry.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + vv, err := newEntry.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + mp[kk] = vv + v = mp + } else if fd.IsRepeated() { + t := reflect.TypeOf(val) + if t.Kind() == reflect.Slice && t != typeOfBytes { + // append slices if we unmarshalled a packed repeated field + newVals := val.([]interface{}) + sl = append(sl, newVals...) + } else { + sl = append(sl, val) + } + v = sl + } else { + v = val + } + } + m.internalSetField(fd, v) + return v, nil +} + +func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) { + return validFieldValueForRv(fd, reflect.ValueOf(val)) +} + +func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + if fd.IsMap() && val.Kind() == reflect.Map { + return validFieldValueForMapField(fd, val) + } + + if fd.IsRepeated() { // this will also catch map fields where given value was not a map + if val.Kind() != reflect.Array && val.Kind() != reflect.Slice { + if fd.IsMap() { + return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type()) + } else { + return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type()) + } + } + + if fd.IsMap() { + // value should be a slice of entry messages that we need convert into a map[interface{}]interface{} + m := map[interface{}]interface{}{} + for i := 0; i < val.Len(); i++ { + e, err := validElementFieldValue(fd, val.Index(i).Interface(), false) + if err != nil { + return nil, err + } + msg := e.(proto.Message) + dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil) + if err != nil { + return nil, err + } + k, err := dm.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + v, err := dm.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + m[k] = v + } + return m, nil + } + + // make a defensive copy while checking contents (also converts to []interface{}) + s := make([]interface{}, val.Len()) + for i := 0; i < val.Len(); i++ { + ev := val.Index(i) + if ev.Kind() == reflect.Interface { + // unwrap it + ev = reflect.ValueOf(ev.Interface()) + } + e, err := validElementFieldValueForRv(fd, ev, false) + if err != nil { + return nil, err + } + s[i] = e + } + + return s, nil + } + + return validElementFieldValueForRv(fd, val, false) +} + +func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) { + if dm, ok := m.(*Message); ok { + return dm, nil + } + dm := NewMessageWithMessageFactory(md, mf) + if err := dm.mergeFrom(m); err != nil { + return nil, err + } + return dm, nil +} + +func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}, allowNilMessage bool) (interface{}, error) { + return validElementFieldValueForRv(fd, reflect.ValueOf(val), allowNilMessage) +} + +func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value, allowNilMessage bool) (interface{}, error) { + t := fd.GetType() + if !val.IsValid() { + return nil, typeError(fd, nil) + } + + switch t { + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32, + descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_ENUM: + return toInt32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64, + descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64: + return toInt64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED32, + descriptorpb.FieldDescriptorProto_TYPE_UINT32: + return toUint32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FIXED64, + descriptorpb.FieldDescriptorProto_TYPE_UINT64: + return toUint64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + return toFloat32(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return toFloat64(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + return toBool(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return toBytes(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return toString(reflect.Indirect(val), fd) + + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + m, err := asMessage(val, fd.GetFullyQualifiedName()) + // check that message is correct type + if err != nil { + return nil, err + } + var msgType string + if dm, ok := m.(*Message); ok { + if allowNilMessage && dm == nil { + // if dm == nil, we'll panic below, so early out if that is allowed + // (only allowed for map values, to indicate an entry w/ no value) + return m, nil + } + msgType = dm.GetMessageDescriptor().GetFullyQualifiedName() + } else { + msgType = proto.MessageName(m) + } + if msgType != fd.GetMessageType().GetFullyQualifiedName() { + return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType) + } + return m, nil + + default: + return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType()) + } +} + +func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) { + if v.Kind() == reflect.Int32 { + return int32(v.Int()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) { + if v.Kind() == reflect.Uint32 { + return uint32(v.Uint()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) { + if v.Kind() == reflect.Float32 { + return float32(v.Float()), nil + } + return 0, typeError(fd, v.Type()) +} + +func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) { + if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 { + return v.Int(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) { + if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 { + return v.Uint(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) { + if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 { + return v.Float(), nil + } + return 0, typeError(fd, v.Type()) +} + +func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) { + if v.Kind() == reflect.Bool { + return v.Bool(), nil + } + return false, typeError(fd, v.Type()) +} + +func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + return v.Bytes(), nil + } + return nil, typeError(fd, v.Type()) +} + +func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) { + if v.Kind() == reflect.String { + return v.String(), nil + } + return "", typeError(fd, v.Type()) +} + +func typeError(fd *desc.FieldDescriptor, t reflect.Type) error { + return fmt.Errorf( + "%s field %s is not compatible with value of type %v", + getTypeString(fd), fd.GetFullyQualifiedName(), t) +} + +func getTypeString(fd *desc.FieldDescriptor) string { + return strings.ToLower(fd.GetType().String()) +} + +func asMessage(v reflect.Value, fieldName string) (proto.Message, error) { + t := v.Type() + // we need a pointer to a struct that implements proto.Message + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) { + return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type()) + } + return v.Interface().(proto.Message), nil +} + +// Reset resets this message to an empty message. It removes all values set in +// the message. +func (m *Message) Reset() { + for k := range m.values { + delete(m.values, k) + } + for k := range m.unknownFields { + delete(m.unknownFields, k) + } +} + +// String returns this message rendered in compact text format. +func (m *Message) String() string { + b, err := m.MarshalText() + if err != nil { + panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error())) + } + return string(b) +} + +// ProtoMessage is present to satisfy the proto.Message interface. +func (m *Message) ProtoMessage() { +} + +// ConvertTo converts this dynamic message into the given message. This is +// shorthand for resetting then merging: +// +// target.Reset() +// m.MergeInto(target) +func (m *Message) ConvertTo(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + target.Reset() + return m.mergeInto(target, defaultDeterminism) +} + +// ConvertToDeterministic converts this dynamic message into the given message. +// It is just like ConvertTo, but it attempts to produce deterministic results. +// That means that if the target is a generated message (not another dynamic +// message) and the current runtime is unaware of any fields or extensions that +// are present in m, they will be serialized into the target's unrecognized +// fields deterministically. +func (m *Message) ConvertToDeterministic(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + target.Reset() + return m.mergeInto(target, true) +} + +// ConvertFrom converts the given message into this dynamic message. This is +// shorthand for resetting then merging: +// +// m.Reset() +// m.MergeFrom(target) +func (m *Message) ConvertFrom(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + + m.Reset() + return m.mergeFrom(target) +} + +// MergeInto merges this dynamic message into the given message. All field +// values in this message will be set on the given message. For map fields, +// entries are added to the given message (if the given message has existing +// values for like keys, they are overwritten). For slice fields, elements are +// added. +// +// If the given message has a different set of known fields, it is possible for +// some known fields in this message to be represented as unknown fields in the +// given message after merging, and vice versa. +func (m *Message) MergeInto(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + return m.mergeInto(target, defaultDeterminism) +} + +// MergeIntoDeterministic merges this dynamic message into the given message. +// It is just like MergeInto, but it attempts to produce deterministic results. +// That means that if the target is a generated message (not another dynamic +// message) and the current runtime is unaware of any fields or extensions that +// are present in m, they will be serialized into the target's unrecognized +// fields deterministically. +func (m *Message) MergeIntoDeterministic(target proto.Message) error { + if err := m.checkType(target); err != nil { + return err + } + return m.mergeInto(target, true) +} + +// MergeFrom merges the given message into this dynamic message. All field +// values in the given message will be set on this message. For map fields, +// entries are added to this message (if this message has existing values for +// like keys, they are overwritten). For slice fields, elements are added. +// +// If the given message has a different set of known fields, it is possible for +// some known fields in that message to be represented as unknown fields in this +// message after merging, and vice versa. +func (m *Message) MergeFrom(source proto.Message) error { + if err := m.checkType(source); err != nil { + return err + } + return m.mergeFrom(source) +} + +// Merge implements the proto.Merger interface so that dynamic messages are +// compatible with the proto.Merge function. It delegates to MergeFrom but will +// panic on error as the proto.Merger interface doesn't allow for returning an +// error. +// +// Unlike nearly all other methods, this method can work if this message's type +// is not defined (such as instantiating the message without using NewMessage). +// This is strictly so that dynamic message's are compatible with the +// proto.Clone function, which instantiates a new message via reflection (thus +// its message descriptor will not be set) and than calls Merge. +func (m *Message) Merge(source proto.Message) { + if m.md == nil { + // To support proto.Clone, initialize the descriptor from the source. + if dm, ok := source.(*Message); ok { + m.md = dm.md + // also make sure the clone uses the same message factory and + // extensions and also knows about the same extra fields (if any) + m.mf = dm.mf + m.er = dm.er + m.extraFields = dm.extraFields + } else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil { + panic(err.Error()) + } else { + m.md = md + } + } + + if err := m.MergeFrom(source); err != nil { + panic(err.Error()) + } +} + +func (m *Message) checkType(target proto.Message) error { + if dm, ok := target.(*Message); ok { + if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName()) + } + return nil + } + + msgName := proto.MessageName(target) + if msgName != m.md.GetFullyQualifiedName() { + return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName()) + } + return nil +} + +func (m *Message) mergeInto(pm proto.Message, deterministic bool) error { + if dm, ok := pm.(*Message); ok { + return dm.mergeFrom(m) + } + + target := reflect.ValueOf(pm) + if target.Kind() == reflect.Ptr { + target = target.Elem() + } + + // track tags for which the dynamic message has data but the given + // message doesn't know about it + unknownTags := map[int32]struct{}{} + for tag := range m.values { + unknownTags[tag] = struct{}{} + } + + // check that we can successfully do the merge + structProps := proto.GetProperties(reflect.TypeOf(pm).Elem()) + for _, prop := range structProps.Prop { + if prop.Tag == 0 { + continue // one-of or special field (such as XXX_unrecognized, etc.) + } + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + f := target.FieldByName(prop.Name) + ft := f.Type() + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + // check one-of fields + for _, oop := range structProps.OneofTypes { + prop := oop.Prop + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + stf, ok := oop.Type.Elem().FieldByName(prop.Name) + if !ok { + return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem()) + } + ft := stf.Type + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + // and check extensions, too + for tag, ext := range proto.RegisteredExtensions(pm) { + v, ok := m.values[tag] + if !ok { + continue + } + if unknownTags != nil { + delete(unknownTags, tag) + } + ft := reflect.TypeOf(ext.ExtensionType) + val := reflect.ValueOf(v) + if !canConvert(val, ft) { + return fmt.Errorf("cannot convert %v to %v", val.Type(), ft) + } + } + + // now actually perform the merge + for _, prop := range structProps.Prop { + v, ok := m.values[int32(prop.Tag)] + if !ok { + continue + } + f := target.FieldByName(prop.Name) + if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil { + return err + } + } + // merge one-ofs + for _, oop := range structProps.OneofTypes { + prop := oop.Prop + tag := int32(prop.Tag) + v, ok := m.values[tag] + if !ok { + continue + } + oov := reflect.New(oop.Type.Elem()) + f := oov.Elem().FieldByName(prop.Name) + if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil { + return err + } + target.Field(oop.Field).Set(oov) + } + // merge extensions, too + for tag, ext := range proto.RegisteredExtensions(pm) { + v, ok := m.values[tag] + if !ok { + continue + } + e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem() + if err := mergeVal(reflect.ValueOf(v), e, deterministic); err != nil { + return err + } + if err := proto.SetExtension(pm, ext, e.Interface()); err != nil { + // shouldn't happen since we already checked that the extension type was compatible above + return err + } + } + + // if we have fields that the given message doesn't know about, add to its unknown fields + if len(unknownTags) > 0 { + var b codec.Buffer + b.SetDeterministic(deterministic) + if deterministic { + // if we need to emit things deterministically, sort the + // extensions by their tag number + sortedUnknownTags := make([]int32, 0, len(unknownTags)) + for tag := range unknownTags { + sortedUnknownTags = append(sortedUnknownTags, tag) + } + sort.Slice(sortedUnknownTags, func(i, j int) bool { + return sortedUnknownTags[i] < sortedUnknownTags[j] + }) + for _, tag := range sortedUnknownTags { + fd := m.FindFieldDescriptor(tag) + if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil { + return err + } + } + } else { + for tag := range unknownTags { + fd := m.FindFieldDescriptor(tag) + if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil { + return err + } + } + } + + internal.SetUnrecognized(pm, b.Bytes()) + } + + // finally, convey unknown fields into the given message by letting it unmarshal them + // (this will append to its unknown fields if not known; if somehow the given message recognizes + // a field even though the dynamic message did not, it will get correctly unmarshalled) + if unknownTags != nil && len(m.unknownFields) > 0 { + var b codec.Buffer + _ = m.marshalUnknownFields(&b) + _ = proto.UnmarshalMerge(b.Bytes(), pm) + } + + return nil +} + +func canConvert(src reflect.Value, target reflect.Type) bool { + if src.Kind() == reflect.Interface { + src = reflect.ValueOf(src.Interface()) + } + srcType := src.Type() + // we allow convertible types instead of requiring exact types so that calling + // code can, for example, assign an enum constant to an enum field. In that case, + // one type is the enum type (a sub-type of int32) and the other may be the int32 + // type. So we automatically do the conversion in that case. + if srcType.ConvertibleTo(target) { + return true + } else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) { + return true + } else if target.Kind() == reflect.Slice { + if srcType.Kind() != reflect.Slice { + return false + } + et := target.Elem() + for i := 0; i < src.Len(); i++ { + if !canConvert(src.Index(i), et) { + return false + } + } + return true + } else if target.Kind() == reflect.Map { + if srcType.Kind() != reflect.Map { + return false + } + return canConvertMap(src, target) + } else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) { + z := reflect.Zero(target).Interface() + msgType := proto.MessageName(z.(proto.Message)) + return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName() + } else { + return false + } +} + +func mergeVal(src, target reflect.Value, deterministic bool) error { + if src.Kind() == reflect.Interface && !src.IsNil() { + src = src.Elem() + } + srcType := src.Type() + targetType := target.Type() + if srcType.ConvertibleTo(targetType) { + if targetType.Implements(typeOfProtoMessage) && !target.IsNil() { + Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message)) + } else { + target.Set(src.Convert(targetType)) + } + } else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) { + if !src.CanAddr() { + target.Set(reflect.New(targetType.Elem())) + target.Elem().Set(src.Convert(targetType.Elem())) + } else { + target.Set(src.Addr().Convert(targetType)) + } + } else if targetType.Kind() == reflect.Slice { + l := target.Len() + newL := l + src.Len() + if target.Cap() < newL { + // expand capacity of the slice and copy + newSl := reflect.MakeSlice(targetType, newL, newL) + for i := 0; i < target.Len(); i++ { + newSl.Index(i).Set(target.Index(i)) + } + target.Set(newSl) + } else { + target.SetLen(newL) + } + for i := 0; i < src.Len(); i++ { + dest := target.Index(l + i) + if dest.Kind() == reflect.Ptr { + dest.Set(reflect.New(dest.Type().Elem())) + } + if err := mergeVal(src.Index(i), dest, deterministic); err != nil { + return err + } + } + } else if targetType.Kind() == reflect.Map { + return mergeMapVal(src, target, targetType, deterministic) + } else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) { + dm := src.Interface().(*Message) + if target.IsNil() { + target.Set(reflect.New(targetType.Elem())) + } + m := target.Interface().(proto.Message) + if err := dm.mergeInto(m, deterministic); err != nil { + return err + } + } else { + return fmt.Errorf("cannot convert %v to %v", srcType, targetType) + } + return nil +} + +func (m *Message) mergeFrom(pm proto.Message) error { + if dm, ok := pm.(*Message); ok { + // if given message is also a dynamic message, we merge differently + for tag, v := range dm.values { + fd := m.FindFieldDescriptor(tag) + if fd == nil { + fd = dm.FindFieldDescriptor(tag) + } + if err := mergeField(m, fd, v); err != nil { + return err + } + } + return nil + } + + pmrv := reflect.ValueOf(pm) + if pmrv.IsNil() { + // nil is an empty message, so nothing to do + return nil + } + + // check that we can successfully do the merge + src := pmrv.Elem() + values := map[*desc.FieldDescriptor]interface{}{} + props := proto.GetProperties(reflect.TypeOf(pm).Elem()) + if props == nil { + return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem()) + } + + // regular fields + for _, prop := range props.Prop { + if prop.Tag == 0 { + continue // one-of or special field (such as XXX_unrecognized, etc.) + } + fd := m.FindFieldDescriptor(int32(prop.Tag)) + if fd == nil { + // Our descriptor has different fields than this message object. So + // try to reflect on the message object's fields. + md, err := desc.LoadMessageDescriptorForMessage(pm) + if err != nil { + return err + } + fd = md.FindFieldByNumber(int32(prop.Tag)) + if fd == nil { + return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name) + } + } + rv := src.FieldByName(prop.Name) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() { + continue + } + if v, err := validFieldValueForRv(fd, rv); err != nil { + return err + } else { + values[fd] = v + } + } + + // one-of fields + for _, oop := range props.OneofTypes { + oov := src.Field(oop.Field).Elem() + if !oov.IsValid() || oov.Type() != oop.Type { + // this field is unset (in other words, one-of message field is not currently set to this option) + continue + } + prop := oop.Prop + rv := oov.Elem().FieldByName(prop.Name) + fd := m.FindFieldDescriptor(int32(prop.Tag)) + if fd == nil { + // Our descriptor has different fields than this message object. So + // try to reflect on the message object's fields. + md, err := desc.LoadMessageDescriptorForMessage(pm) + if err != nil { + return err + } + fd = md.FindFieldByNumber(int32(prop.Tag)) + if fd == nil { + return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name) + } + } + if v, err := validFieldValueForRv(fd, rv); err != nil { + return err + } else { + values[fd] = v + } + } + + // extension fields + rexts, _ := proto.ExtensionDescs(pm) + for _, ed := range rexts { + v, _ := proto.GetExtension(pm, ed) + if v == nil { + continue + } + if ed.ExtensionType == nil { + // unrecognized extension: we'll handle that below when we + // handle other unrecognized fields + continue + } + fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field) + if fd == nil { + var err error + if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil { + return err + } + } + if v, err := validFieldValue(fd, v); err != nil { + return err + } else { + values[fd] = v + } + } + + // With API v2, it is possible that the new protoreflect interfaces + // were used to store an extension, which means it can't be returned + // by proto.ExtensionDescs and it's also not in the unrecognized data. + // So we have a separate loop to trawl through it... + var err error + proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if !fld.IsExtension() { + // normal field... we already got it above + return true + } + xt := fld.(protoreflect.ExtensionTypeDescriptor) + if _, ok := xt.Type().(*proto.ExtensionDesc); ok { + // known extension... we already got it above + return true + } + var fd *desc.FieldDescriptor + fd, err = desc.WrapField(fld) + if err != nil { + return false + } + v := convertProtoReflectValue(val) + if v, err = validFieldValue(fd, v); err != nil { + return false + } + values[fd] = v + return true + }) + if err != nil { + return err + } + + // unrecognized extensions fields: + // In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs + // above NOR included in unrecognized fields below. These are extensions that use + // a custom extension type (not a generated one -- i.e. not a linked in extension). + mr := proto.MessageReflect(pm) + var extBytes []byte + var retErr error + mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if !fld.IsExtension() { + // normal field, already processed above + return true + } + if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + if _, ok := extd.Type().(*proto.ExtensionDesc); ok { + // normal known extension, already processed above + return true + } + } + + // marshal the extension to bytes and then handle as unknown field below + mr.New() + mr.Set(fld, val) + extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface()) + return retErr == nil + }) + if retErr != nil { + return retErr + } + + // now actually perform the merge + for fd, v := range values { + if err := mergeField(m, fd, v); err != nil { + return err + } + } + + if len(extBytes) > 0 { + // treating unrecognized extensions like unknown fields: best-effort + // ignore any error returned: pulling in unknown fields is best-effort + _ = m.UnmarshalMerge(extBytes) + } + + data := internal.GetUnrecognized(pm) + if len(data) > 0 { + // ignore any error returned: pulling in unknown fields is best-effort + _ = m.UnmarshalMerge(data) + } + + return nil +} + +func convertProtoReflectValue(v protoreflect.Value) interface{} { + val := v.Interface() + switch val := val.(type) { + case protoreflect.Message: + return val.Interface() + case protoreflect.Map: + mp := make(map[interface{}]interface{}, val.Len()) + val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v) + return true + }) + return mp + case protoreflect.List: + sl := make([]interface{}, val.Len()) + for i := 0; i < val.Len(); i++ { + sl[i] = convertProtoReflectValue(val.Get(i)) + } + return sl + case protoreflect.EnumNumber: + return int32(val) + default: + return val + } +} + +// Validate checks that all required fields are present. It returns an error if any are absent. +func (m *Message) Validate() error { + missingFields := m.findMissingFields() + if len(missingFields) == 0 { + return nil + } + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) +} + +func (m *Message) findMissingFields() []string { + if m.md.IsProto3() { + // proto3 does not allow required fields + return nil + } + var missingFields []string + for _, fd := range m.md.GetFields() { + if fd.IsRequired() { + if _, ok := m.values[fd.GetNumber()]; !ok { + missingFields = append(missingFields, fd.GetName()) + } + } + } + return missingFields +} + +// ValidateRecursive checks that all required fields are present and also +// recursively validates all fields who are also messages. It returns an error +// if any required fields, in this message or nested within, are absent. +func (m *Message) ValidateRecursive() error { + return m.validateRecursive("") +} + +func (m *Message) validateRecursive(prefix string) error { + if missingFields := m.findMissingFields(); len(missingFields) > 0 { + for i := range missingFields { + missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i]) + } + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) + } + + for tag, fld := range m.values { + fd := m.FindFieldDescriptor(tag) + var chprefix string + var md *desc.MessageDescriptor + checkMsg := func(pm proto.Message) error { + var dm *Message + if d, ok := pm.(*Message); ok { + dm = d + } else if pm != nil { + dm = m.mf.NewDynamicMessage(md) + if err := dm.ConvertFrom(pm); err != nil { + return nil + } + } + if dm == nil { + return nil + } + if err := dm.validateRecursive(chprefix); err != nil { + return err + } + return nil + } + isMap := fd.IsMap() + if isMap && fd.GetMapValueType().GetMessageType() != nil { + md = fd.GetMapValueType().GetMessageType() + mp := fld.(map[interface{}]interface{}) + for k, v := range mp { + chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k) + if err := checkMsg(v.(proto.Message)); err != nil { + return err + } + } + } else if !isMap && fd.GetMessageType() != nil { + md = fd.GetMessageType() + if fd.IsRepeated() { + sl := fld.([]interface{}) + for i, v := range sl { + chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i) + if err := checkMsg(v.(proto.Message)); err != nil { + return err + } + } + } else { + chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd)) + if err := checkMsg(fld.(proto.Message)); err != nil { + return err + } + } + } + } + + return nil +} + +func getName(fd *desc.FieldDescriptor) string { + if fd.IsExtension() { + return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName()) + } else { + return fd.GetName() + } +} + +// knownFieldTags return tags of present and recognized fields, in sorted order. +func (m *Message) knownFieldTags() []int { + if len(m.values) == 0 { + return []int(nil) + } + + keys := make([]int, len(m.values)) + i := 0 + for k := range m.values { + keys[i] = int(k) + i++ + } + + sort.Ints(keys) + return keys +} + +// allKnownFieldTags return tags of present and recognized fields, including +// those that are unset, in sorted order. This only includes extensions that are +// present. Known but not-present extensions are not included in the returned +// set of tags. +func (m *Message) allKnownFieldTags() []int { + fds := m.md.GetFields() + keys := make([]int, 0, len(fds)+len(m.extraFields)) + + for k := range m.values { + keys = append(keys, int(k)) + } + + // also include known fields that are not present + for _, fd := range fds { + if _, ok := m.values[fd.GetNumber()]; !ok { + keys = append(keys, int(fd.GetNumber())) + } + } + for _, fd := range m.extraFields { + if !fd.IsExtension() { // skip extensions that are not present + if _, ok := m.values[fd.GetNumber()]; !ok { + keys = append(keys, int(fd.GetNumber())) + } + } + } + + sort.Ints(keys) + return keys +} + +// unknownFieldTags return tags of present but unrecognized fields, in sorted order. +func (m *Message) unknownFieldTags() []int { + if len(m.unknownFields) == 0 { + return []int(nil) + } + keys := make([]int, len(m.unknownFields)) + i := 0 + for k := range m.unknownFields { + keys[i] = int(k) + i++ + } + sort.Ints(keys) + return keys +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go new file mode 100644 index 00000000..e44c6c53 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go @@ -0,0 +1,157 @@ +package dynamic + +import ( + "bytes" + "reflect" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they +// have the same message type and same fields set to equal values. For proto3 messages, fields set +// to their zero value are considered unset. +func Equal(a, b *Message) bool { + if a == b { + return true + } + if (a == nil) != (b == nil) { + return false + } + if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() { + return false + } + if len(a.values) != len(b.values) { + return false + } + if len(a.unknownFields) != len(b.unknownFields) { + return false + } + for tag, aval := range a.values { + bval, ok := b.values[tag] + if !ok { + return false + } + if !fieldsEqual(aval, bval) { + return false + } + } + for tag, au := range a.unknownFields { + bu, ok := b.unknownFields[tag] + if !ok { + return false + } + if len(au) != len(bu) { + return false + } + for i, aval := range au { + bval := bu[i] + if aval.Encoding != bval.Encoding { + return false + } + if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup { + if !bytes.Equal(aval.Contents, bval.Contents) { + return false + } + } else if aval.Value != bval.Value { + return false + } + } + } + // all checks pass! + return true +} + +func fieldsEqual(aval, bval interface{}) bool { + arv := reflect.ValueOf(aval) + brv := reflect.ValueOf(bval) + if arv.Type() != brv.Type() { + // it is possible that one is a dynamic message and one is not + apm, ok := aval.(proto.Message) + if !ok { + return false + } + bpm, ok := bval.(proto.Message) + if !ok { + return false + } + return MessagesEqual(apm, bpm) + + } else { + switch arv.Kind() { + case reflect.Ptr: + apm, ok := aval.(proto.Message) + if !ok { + // Don't know how to compare pointer values that aren't messages! + // Maybe this should panic? + return false + } + bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type + return MessagesEqual(apm, bpm) + + case reflect.Map: + return mapsEqual(arv, brv) + + case reflect.Slice: + if arv.Type() == typeOfBytes { + return bytes.Equal(aval.([]byte), bval.([]byte)) + } else { + return slicesEqual(arv, brv) + } + + default: + return aval == bval + } + } +} + +func slicesEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + for i := 0; i < a.Len(); i++ { + ai := a.Index(i) + bi := b.Index(i) + if !fieldsEqual(ai.Interface(), bi.Interface()) { + return false + } + } + return true +} + +// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal +// when one or both of the messages might be a dynamic message. +func MessagesEqual(a, b proto.Message) bool { + da, aok := a.(*Message) + db, bok := b.(*Message) + // Both dynamic messages + if aok && bok { + return Equal(da, db) + } + // Neither dynamic messages + if !aok && !bok { + return proto.Equal(a, b) + } + // Mixed + if bok { + // we want a to be the dynamic one + b, da = a, db + } + + // Instead of panic'ing below if we have a nil dynamic message, check + // now and return false if the input message is not also nil. + if da == nil { + return isNil(b) + } + + md, err := desc.LoadMessageDescriptorForMessage(b) + if err != nil { + return false + } + db = NewMessageWithMessageFactory(md, da.mf) + if db.ConvertFrom(b) != nil { + return false + } + return Equal(da, db) +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go new file mode 100644 index 00000000..1d381610 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go @@ -0,0 +1,46 @@ +package dynamic + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" +) + +// SetExtension sets the given extension value. If the given message is not a +// dynamic message, the given extension may not be recognized (or may differ +// from the compiled and linked in version of the extension. So in that case, +// this function will serialize the given value to bytes and then use +// proto.SetRawExtension to set the value. +func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error { + if !extd.IsExtension() { + return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName()) + } + + if dm, ok := msg.(*Message); ok { + return dm.TrySetField(extd, val) + } + + md, err := desc.LoadMessageDescriptorForMessage(msg) + if err != nil { + return err + } + if err := checkField(extd, md); err != nil { + return err + } + + val, err = validFieldValue(extd, val) + if err != nil { + return err + } + + var b codec.Buffer + b.SetDeterministic(defaultDeterminism) + if err := b.EncodeFieldValue(extd, val); err != nil { + return err + } + proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes()) + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go new file mode 100644 index 00000000..68768278 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go @@ -0,0 +1,241 @@ +package dynamic + +import ( + "fmt" + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// ExtensionRegistry is a registry of known extension fields. This is used to parse +// extension fields encountered when de-serializing a dynamic message. +type ExtensionRegistry struct { + includeDefault bool + mu sync.RWMutex + exts map[string]map[int32]*desc.FieldDescriptor +} + +// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions, +// which are those that are statically linked into the current program (e.g. registered by +// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the +// registry will override any default extensions that are for the same extendee and have the +// same tag number and/or name. +func NewExtensionRegistryWithDefaults() *ExtensionRegistry { + return &ExtensionRegistry{includeDefault: true} +} + +// AddExtensionDesc adds the given extensions to the registry. +func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error { + flds := make([]*desc.FieldDescriptor, len(exts)) + for i, ext := range exts { + fd, err := desc.LoadFieldDescriptorForExtension(ext) + if err != nil { + return err + } + flds[i] = fd + } + r.mu.Lock() + defer r.mu.Unlock() + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, fd := range flds { + r.putExtensionLocked(fd) + } + return nil +} + +// AddExtension adds the given extensions to the registry. The given extensions +// will overwrite any previously added extensions that are for the same extendee +// message and same extension tag number. +func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error { + for _, ext := range exts { + if !ext.IsExtension() { + return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName()) + } + } + r.mu.Lock() + defer r.mu.Unlock() + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, ext := range exts { + r.putExtensionLocked(ext) + } + return nil +} + +// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor. +func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) { + r.mu.Lock() + defer r.mu.Unlock() + r.addExtensionsFromFileLocked(fd, false, nil) +} + +// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file +// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds +// extensions from the entire transitive closure for the given file. +func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) { + r.mu.Lock() + defer r.mu.Unlock() + already := map[*desc.FileDescriptor]struct{}{} + r.addExtensionsFromFileLocked(fd, true, already) +} + +func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) { + if _, ok := alreadySeen[fd]; ok { + return + } + + if r.exts == nil { + r.exts = map[string]map[int32]*desc.FieldDescriptor{} + } + for _, ext := range fd.GetExtensions() { + r.putExtensionLocked(ext) + } + for _, msg := range fd.GetMessageTypes() { + r.addExtensionsFromMessageLocked(msg) + } + + if recursive { + alreadySeen[fd] = struct{}{} + for _, dep := range fd.GetDependencies() { + r.addExtensionsFromFileLocked(dep, recursive, alreadySeen) + } + } +} + +func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) { + for _, ext := range md.GetNestedExtensions() { + r.putExtensionLocked(ext) + } + for _, msg := range md.GetNestedMessageTypes() { + r.addExtensionsFromMessageLocked(msg) + } +} + +func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) { + msgName := fd.GetOwner().GetFullyQualifiedName() + m := r.exts[msgName] + if m == nil { + m = map[int32]*desc.FieldDescriptor{} + r.exts[msgName] = m + } + m[fd.GetNumber()] = fd +} + +// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and tag number. If no extension is known, nil is returned. +func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + fd := r.exts[messageName][tagNumber] + if fd == nil && r.includeDefault { + ext := getDefaultExtensions(messageName)[tagNumber] + if ext != nil { + fd, _ = desc.LoadFieldDescriptorForExtension(ext) + } + } + return fd +} + +// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil +// is returned. +func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + for _, fd := range r.exts[messageName] { + if fd.GetFullyQualifiedName() == fieldName { + return fd + } + } + if r.includeDefault { + for _, ext := range getDefaultExtensions(messageName) { + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd.GetFullyQualifiedName() == fieldName { + return fd + } + } + } + return nil +} + +// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified +// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned. +// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last +// component uses the field's JSON name (if present). +func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor { + if r == nil { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + for _, fd := range r.exts[messageName] { + if fd.GetFullyQualifiedJSONName() == fieldName { + return fd + } + } + if r.includeDefault { + for _, ext := range getDefaultExtensions(messageName) { + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd.GetFullyQualifiedJSONName() == fieldName { + return fd + } + } + } + return nil +} + +func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc { + t := proto.MessageType(messageName) + if t != nil { + msg := reflect.Zero(t).Interface().(proto.Message) + return proto.RegisteredExtensions(msg) + } + return nil +} + +// AllExtensionsForType returns all known extension fields for the given extendee name (must be a +// fully-qualified message name). +func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor { + if r == nil { + return []*desc.FieldDescriptor(nil) + } + r.mu.RLock() + defer r.mu.RUnlock() + flds := r.exts[messageName] + var ret []*desc.FieldDescriptor + if r.includeDefault { + exts := getDefaultExtensions(messageName) + if len(exts) > 0 || len(flds) > 0 { + ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds)) + } + for tag, ext := range exts { + if _, ok := flds[tag]; ok { + // skip default extension and use the one explicitly registered instead + continue + } + fd, _ := desc.LoadFieldDescriptorForExtension(ext) + if fd != nil { + ret = append(ret, fd) + } + } + } else if len(flds) > 0 { + ret = make([]*desc.FieldDescriptor, 0, len(flds)) + } + + for _, ext := range flds { + ret = append(ret, ext) + } + return ret +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go new file mode 100644 index 00000000..6fca3937 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go @@ -0,0 +1,310 @@ +// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC +// method where only method descriptors are known. The actual request and response +// messages may be dynamic messages. +package grpcdynamic + +import ( + "context" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/dynamic" +) + +// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server. +type Stub struct { + channel Channel + mf *dynamic.MessageFactory +} + +// Channel represents the operations necessary to issue RPCs via gRPC. The +// *grpc.ClientConn type provides this interface and will typically the concrete +// type used to construct Stubs. But the use of this interface allows +// construction of stubs that use alternate concrete types as the transport for +// RPC operations. +type Channel = grpc.ClientConnInterface + +// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs. +func NewStub(channel Channel) Stub { + return NewStubWithMessageFactory(channel, nil) +} + +// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for +// dispatching RPCs and the given MessageFactory for creating response messages. +func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub { + return Stub{channel: channel, mf: mf} +} + +func requestMethod(md *desc.MethodDescriptor) string { + return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName()) +} + +// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods. +func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) { + if method.IsClientStreaming() || method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + if err := checkMessageType(method.GetInputType(), request); err != nil { + return nil, err + } + resp := s.mf.NewMessage(method.GetOutputType()) + if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil { + return nil, err + } + return resp, nil +} + +// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods. +func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) { + if method.IsClientStreaming() || !method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + if err := checkMessageType(method.GetInputType(), request); err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(ctx) + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() + return nil, err + } else { + err = cs.SendMsg(request) + if err != nil { + cancel() + return nil, err + } + err = cs.CloseSend() + if err != nil { + cancel() + return nil, err + } + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() + return &ServerStream{cs, method.GetOutputType(), s.mf}, nil + } +} + +// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end, +// receive the response message. Use this for client-streaming methods. +func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) { + if !method.IsClientStreaming() || method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + ctx, cancel := context.WithCancel(ctx) + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() + return nil, err + } else { + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() + return &ClientStream{cs, method, s.mf, cancel}, nil + } +} + +// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response +// messages. Use this for bidi-streaming methods. +func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) { + if !method.IsClientStreaming() || !method.IsServerStreaming() { + return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method)) + } + sd := grpc.StreamDesc{ + StreamName: method.GetName(), + ServerStreams: method.IsServerStreaming(), + ClientStreams: method.IsClientStreaming(), + } + if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + return nil, err + } else { + return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil + } +} + +func methodType(md *desc.MethodDescriptor) string { + if md.IsClientStreaming() && md.IsServerStreaming() { + return "bidi-streaming" + } else if md.IsClientStreaming() { + return "client-streaming" + } else if md.IsServerStreaming() { + return "server-streaming" + } else { + return "unary" + } +} + +func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error { + var typeName string + if dm, ok := msg.(*dynamic.Message); ok { + typeName = dm.GetMessageDescriptor().GetFullyQualifiedName() + } else { + typeName = proto.MessageName(msg) + } + if typeName != md.GetFullyQualifiedName() { + return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName) + } + return nil +} + +// ServerStream represents a response stream from a server. Messages in the stream can be queried +// as can header and trailer metadata sent by the server. +type ServerStream struct { + stream grpc.ClientStream + respType *desc.MessageDescriptor + mf *dynamic.MessageFactory +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *ServerStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *ServerStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *ServerStream) Context() context.Context { + return s.stream.Context() +} + +// RecvMsg returns the next message in the response stream or an error. If the stream +// has completed normally, the error is io.EOF. Otherwise, the error indicates the +// nature of the abnormal termination of the stream. +func (s *ServerStream) RecvMsg() (proto.Message, error) { + resp := s.mf.NewMessage(s.respType) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } else { + return resp, nil + } +} + +// ClientStream represents a response stream from a client. Messages in the stream can be sent +// and, when done, the unary server message and header and trailer metadata can be queried. +type ClientStream struct { + stream grpc.ClientStream + method *desc.MethodDescriptor + mf *dynamic.MessageFactory + cancel context.CancelFunc +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *ClientStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *ClientStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *ClientStream) Context() context.Context { + return s.stream.Context() +} + +// SendMsg sends a request message to the server. +func (s *ClientStream) SendMsg(m proto.Message) error { + if err := checkMessageType(s.method.GetInputType(), m); err != nil { + return err + } + return s.stream.SendMsg(m) +} + +// CloseAndReceive closes the outgoing request stream and then blocks for the server's response. +func (s *ClientStream) CloseAndReceive() (proto.Message, error) { + if err := s.stream.CloseSend(); err != nil { + return nil, err + } + resp := s.mf.NewMessage(s.method.GetOutputType()) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } + // make sure we get EOF for a second message + if err := s.stream.RecvMsg(resp); err != io.EOF { + if err == nil { + s.cancel() + return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName()) + } else { + return nil, err + } + } + return resp, nil +} + +// BidiStream represents a bi-directional stream for sending messages to and receiving +// messages from a server. The header and trailer metadata sent by the server can also be +// queried. +type BidiStream struct { + stream grpc.ClientStream + reqType *desc.MessageDescriptor + respType *desc.MessageDescriptor + mf *dynamic.MessageFactory +} + +// Header returns any header metadata sent by the server (blocks if necessary until headers are +// received). +func (s *BidiStream) Header() (metadata.MD, error) { + return s.stream.Header() +} + +// Trailer returns the trailer metadata sent by the server. It must only be called after +// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream). +func (s *BidiStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +// Context returns the context associated with this streaming operation. +func (s *BidiStream) Context() context.Context { + return s.stream.Context() +} + +// SendMsg sends a request message to the server. +func (s *BidiStream) SendMsg(m proto.Message) error { + if err := checkMessageType(s.reqType, m); err != nil { + return err + } + return s.stream.SendMsg(m) +} + +// CloseSend indicates the request stream has ended. Invoke this after all request messages +// are sent (even if there are zero such messages). +func (s *BidiStream) CloseSend() error { + return s.stream.CloseSend() +} + +// RecvMsg returns the next message in the response stream or an error. If the stream +// has completed normally, the error is io.EOF. Otherwise, the error indicates the +// nature of the abnormal termination of the stream. +func (s *BidiStream) RecvMsg() (proto.Message, error) { + resp := s.mf.NewMessage(s.respType) + if err := s.stream.RecvMsg(resp); err != nil { + return nil, err + } else { + return resp, nil + } +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go new file mode 100644 index 00000000..bd7fcaa5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go @@ -0,0 +1,76 @@ +package dynamic + +import "bytes" + +type indentBuffer struct { + bytes.Buffer + indent string + indentCount int + comma bool +} + +func (b *indentBuffer) start() error { + if b.indentCount >= 0 { + b.indentCount++ + return b.newLine(false) + } + return nil +} + +func (b *indentBuffer) sep() error { + if b.indentCount >= 0 { + _, err := b.WriteString(": ") + return err + } else { + return b.WriteByte(':') + } +} + +func (b *indentBuffer) end() error { + if b.indentCount >= 0 { + b.indentCount-- + return b.newLine(false) + } + return nil +} + +func (b *indentBuffer) maybeNext(first *bool) error { + if *first { + *first = false + return nil + } else { + return b.next() + } +} + +func (b *indentBuffer) next() error { + if b.indentCount >= 0 { + return b.newLine(b.comma) + } else if b.comma { + return b.WriteByte(',') + } else { + return b.WriteByte(' ') + } +} + +func (b *indentBuffer) newLine(comma bool) error { + if comma { + err := b.WriteByte(',') + if err != nil { + return err + } + } + + err := b.WriteByte('\n') + if err != nil { + return err + } + + for i := 0; i < b.indentCount; i++ { + _, err := b.WriteString(b.indent) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go new file mode 100644 index 00000000..9081965f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go @@ -0,0 +1,1256 @@ +package dynamic + +// JSON marshalling and unmarshalling for dynamic messages + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + // link in the well-known-types that have a special JSON format + _ "google.golang.org/protobuf/types/known/anypb" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/emptypb" + _ "google.golang.org/protobuf/types/known/structpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + _ "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/jhump/protoreflect/desc" +) + +var wellKnownTypeNames = map[string]struct{}{ + "google.protobuf.Any": {}, + "google.protobuf.Empty": {}, + "google.protobuf.Duration": {}, + "google.protobuf.Timestamp": {}, + // struct.proto + "google.protobuf.Struct": {}, + "google.protobuf.Value": {}, + "google.protobuf.ListValue": {}, + // wrappers.proto + "google.protobuf.DoubleValue": {}, + "google.protobuf.FloatValue": {}, + "google.protobuf.Int64Value": {}, + "google.protobuf.UInt64Value": {}, + "google.protobuf.Int32Value": {}, + "google.protobuf.UInt32Value": {}, + "google.protobuf.BoolValue": {}, + "google.protobuf.StringValue": {}, + "google.protobuf.BytesValue": {}, +} + +// MarshalJSON serializes this message to bytes in JSON format, returning an +// error if the operation fails. The resulting bytes will be a valid UTF8 +// string. +// +// This method uses a compact form: no newlines, and spaces between fields and +// between field identifiers and values are elided. +// +// This method is convenient shorthand for invoking MarshalJSONPB with a default +// (zero value) marshaler: +// +// m.MarshalJSONPB(&jsonpb.Marshaler{}) +// +// So enums are serialized using enum value name strings, and values that are +// not present (including those with default/zero value for messages defined in +// "proto3" syntax) are omitted. +func (m *Message) MarshalJSON() ([]byte, error) { + return m.MarshalJSONPB(&jsonpb.Marshaler{}) +} + +// MarshalJSONIndent serializes this message to bytes in JSON format, returning +// an error if the operation fails. The resulting bytes will be a valid UTF8 +// string. +// +// This method uses a "pretty-printed" form, with each field on its own line and +// spaces between field identifiers and values. Indentation of two spaces is +// used. +// +// This method is convenient shorthand for invoking MarshalJSONPB with a default +// (zero value) marshaler: +// +// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "}) +// +// So enums are serialized using enum value name strings, and values that are +// not present (including those with default/zero value for messages defined in +// "proto3" syntax) are omitted. +func (m *Message) MarshalJSONIndent() ([]byte, error) { + return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "}) +} + +// MarshalJSONPB serializes this message to bytes in JSON format, returning an +// error if the operation fails. The resulting bytes will be a valid UTF8 +// string. The given marshaler is used to convey options used during marshaling. +// +// If this message contains nested messages that are generated message types (as +// opposed to dynamic messages), the given marshaler is used to marshal it. +// +// When marshaling any nested messages, any jsonpb.AnyResolver configured in the +// given marshaler is augmented with knowledge of message types known to this +// message's descriptor (and its enclosing file and set of transitive +// dependencies). +func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) { + var b indentBuffer + b.indent = opts.Indent + if len(opts.Indent) == 0 { + b.indentCount = -1 + } + b.comma = true + if err := m.marshalJSON(&b, opts); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error { + if m == nil { + _, err := b.WriteString("null") + return err + } + if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed { + newOpts := *opts + newOpts.AnyResolver = r + opts = &newOpts + } + + if ok, err := marshalWellKnownType(m, b, opts); ok { + return err + } + + err := b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + var tags []int + if opts.EmitDefaults { + tags = m.allKnownFieldTags() + } else { + tags = m.knownFieldTags() + } + + first := true + + for _, tag := range tags { + itag := int32(tag) + fd := m.FindFieldDescriptor(itag) + + v, ok := m.values[itag] + if !ok { + if fd.GetOneOf() != nil { + // don't print defaults for fields in a oneof + continue + } + v = fd.GetDefaultValue() + } + + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldJSON(b, fd, v, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + + return nil +} + +func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) { + fqn := m.md.GetFullyQualifiedName() + if _, ok := wellKnownTypeNames[fqn]; !ok { + return false, nil + } + + msgType := proto.MessageType(fqn) + if msgType == nil { + // wtf? + panic(fmt.Sprintf("could not find registered message type for %q", fqn)) + } + + // convert dynamic message to well-known type and let jsonpb marshal it + msg := reflect.New(msgType.Elem()).Interface().(proto.Message) + if err := m.MergeInto(msg); err != nil { + return true, err + } + return true, opts.Marshal(b, msg) +} + +func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error { + var jsonName string + if opts.OrigName { + jsonName = fd.GetName() + } else { + jsonName = fd.AsFieldDescriptorProto().GetJsonName() + if jsonName == "" { + jsonName = fd.GetName() + } + } + if fd.IsExtension() { + var scope string + switch parent := fd.GetParent().(type) { + case *desc.FileDescriptor: + scope = parent.GetPackage() + default: + scope = parent.GetFullyQualifiedName() + } + if scope == "" { + jsonName = fmt.Sprintf("[%s]", jsonName) + } else { + jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName) + } + } + err := writeJsonString(b, jsonName) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + + if isNil(v) { + _, err := b.WriteString("null") + return err + } + + if fd.IsMap() { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + md := fd.GetMessageType() + vfd := md.FindFieldByNumber(2) + + mp := v.(map[interface{}]interface{}) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + first := true + for _, mk := range keys { + mv := mp[mk] + err := b.maybeNext(&first) + if err != nil { + return err + } + + err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte('}') + + } else if fd.IsRepeated() { + err = b.WriteByte('[') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + sl := v.([]interface{}) + first := true + for _, slv := range sl { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldValueJSON(b, fd, slv, opts) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte(']') + + } else { + return marshalKnownFieldValueJSON(b, fd, v, opts) + } +} + +// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64), +// bools, or strings. +type sortable []interface{} + +func (s sortable) Len() int { + return len(s) +} + +func (s sortable) Less(i, j int) bool { + vi := s[i] + vj := s[j] + switch reflect.TypeOf(vi).Kind() { + case reflect.Int32: + return vi.(int32) < vj.(int32) + case reflect.Int64: + return vi.(int64) < vj.(int64) + case reflect.Uint32: + return vi.(uint32) < vj.(uint32) + case reflect.Uint64: + return vi.(uint64) < vj.(uint64) + case reflect.String: + return vi.(string) < vj.(string) + case reflect.Bool: + return !vi.(bool) && vj.(bool) + default: + panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi))) + } +} + +func (s sortable) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() == reflect.Ptr && rv.IsNil() +} + +func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error { + rk := reflect.ValueOf(mk) + var strkey string + switch rk.Kind() { + case reflect.Bool: + strkey = strconv.FormatBool(rk.Bool()) + case reflect.Int32, reflect.Int64: + strkey = strconv.FormatInt(rk.Int(), 10) + case reflect.Uint32, reflect.Uint64: + strkey = strconv.FormatUint(rk.Uint(), 10) + case reflect.String: + strkey = rk.String() + default: + return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type()) + } + err := writeJsonString(b, strkey) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + return marshalKnownFieldValueJSON(b, vfd, mv, opts) +} + +func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Int64: + return writeJsonString(b, strconv.FormatInt(rv.Int(), 10)) + case reflect.Int32: + ed := fd.GetEnumType() + if !opts.EnumsAsInts && ed != nil { + n := int32(rv.Int()) + vd := ed.FindValueByNumber(n) + if vd == nil { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } else { + return writeJsonString(b, vd.GetName()) + } + } else { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } + case reflect.Uint64: + return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10)) + case reflect.Uint32: + _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10)) + return err + case reflect.Float32, reflect.Float64: + f := rv.Float() + var str string + if math.IsNaN(f) { + str = `"NaN"` + } else if math.IsInf(f, 1) { + str = `"Infinity"` + } else if math.IsInf(f, -1) { + str = `"-Infinity"` + } else { + var bits int + if rv.Kind() == reflect.Float32 { + bits = 32 + } else { + bits = 64 + } + str = strconv.FormatFloat(rv.Float(), 'g', -1, bits) + } + _, err := b.WriteString(str) + return err + case reflect.Bool: + _, err := b.WriteString(strconv.FormatBool(rv.Bool())) + return err + case reflect.Slice: + bstr := base64.StdEncoding.EncodeToString(rv.Bytes()) + return writeJsonString(b, bstr) + case reflect.String: + return writeJsonString(b, rv.String()) + default: + // must be a message + if isNil(v) { + _, err := b.WriteString("null") + return err + } + + if dm, ok := v.(*Message); ok { + return dm.marshalJSON(b, opts) + } + + var err error + if b.indentCount <= 0 || len(b.indent) == 0 { + err = opts.Marshal(b, v.(proto.Message)) + } else { + str, err := opts.MarshalToString(v.(proto.Message)) + if err != nil { + return err + } + indent := strings.Repeat(b.indent, b.indentCount) + pos := 0 + // add indention prefix to each line + for pos < len(str) { + start := pos + nextPos := strings.Index(str[pos:], "\n") + if nextPos == -1 { + nextPos = len(str) + } else { + nextPos = pos + nextPos + 1 // include newline + } + line := str[start:nextPos] + if pos > 0 { + _, err = b.WriteString(indent) + if err != nil { + return err + } + } + _, err = b.WriteString(line) + if err != nil { + return err + } + pos = nextPos + } + } + return err + } +} + +func writeJsonString(b *indentBuffer, s string) error { + if sbytes, err := json.Marshal(s); err != nil { + return err + } else { + _, err := b.Write(sbytes) + return err + } +} + +// UnmarshalJSON de-serializes the message that is present, in JSON format, in +// the given bytes into this message. It first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in JSON format. +// +// This method is shorthand for invoking UnmarshalJSONPB with a default (zero +// value) unmarshaler: +// +// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js) +// +// So unknown fields will result in an error, and no provided jsonpb.AnyResolver +// will be used when parsing google.protobuf.Any messages. +func (m *Message) UnmarshalJSON(js []byte) error { + return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js) +} + +// UnmarshalMergeJSON de-serializes the message that is present, in JSON format, +// in the given bytes into this message. Unlike UnmarshalJSON, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeJSON(js []byte) error { + return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js) +} + +// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in +// the given bytes into this message. The given unmarshaler conveys options used +// when parsing the JSON. This function first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in JSON format. +// +// The decoding is lenient: +// 1. The JSON can refer to fields either by their JSON name or by their +// declared name. +// 2. The JSON can use either numeric values or string names for enum values. +// +// When instantiating nested messages, if this message's associated factory +// returns a generated message type (as opposed to a dynamic message), the given +// unmarshaler is used to unmarshal it. +// +// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in +// the given unmarshaler is augmented with knowledge of message types known to +// this message's descriptor (and its enclosing file and set of transitive +// dependencies). +func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error { + m.Reset() + if err := m.UnmarshalMergeJSONPB(opts, js); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON +// format, in the given bytes into this message. The given unmarshaler conveys +// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error { + r := newJsReader(js) + err := m.unmarshalJson(r, opts) + if err != nil { + return err + } + if t, err := r.poll(); err != io.EOF { + b, _ := ioutil.ReadAll(r.unread()) + s := fmt.Sprintf("%v%s", t, string(b)) + return fmt.Errorf("superfluous data found after JSON object: %q", s) + } + return nil +} + +func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) { + fqn := m.md.GetFullyQualifiedName() + if _, ok := wellKnownTypeNames[fqn]; !ok { + return false, nil + } + + msgType := proto.MessageType(fqn) + if msgType == nil { + // wtf? + panic(fmt.Sprintf("could not find registered message type for %q", fqn)) + } + + // extract json value from r + var js json.RawMessage + if err := json.NewDecoder(r.unread()).Decode(&js); err != nil { + return true, err + } + if err := r.skip(); err != nil { + return true, err + } + + // unmarshal into well-known type and then convert to dynamic message + msg := reflect.New(msgType.Elem()).Interface().(proto.Message) + if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil { + return true, err + } + return true, m.MergeFrom(msg) +} + +func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error { + if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed { + newOpts := *opts + newOpts.AnyResolver = r + opts = &newOpts + } + + if ok, err := unmarshalWellKnownType(m, r, opts); ok { + return err + } + + t, err := r.peek() + if err != nil { + return err + } + if t == nil { + // if json is simply "null" we do nothing + r.poll() + return nil + } + + if err := r.beginObject(); err != nil { + return err + } + + for r.hasNext() { + f, err := r.nextObjectKey() + if err != nil { + return err + } + fd := m.FindFieldDescriptorByJSONName(f) + if fd == nil { + if opts.AllowUnknownFields { + r.skip() + continue + } + return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f) + } + v, err := unmarshalJsField(fd, r, m.mf, opts) + if err != nil { + return err + } + if v != nil { + if err := mergeField(m, fd, v); err != nil { + return err + } + } else if fd.GetOneOf() != nil { + // preserve explicit null for oneof fields (this is a little odd but + // mimics the behavior of jsonpb with oneofs in generated message types) + if fd.GetMessageType() != nil { + typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName()) + if typ != nil { + // typed nil + if typ.Kind() != reflect.Ptr { + typ = reflect.PtrTo(typ) + } + v = reflect.Zero(typ).Interface() + } else { + // can't use nil dynamic message, so we just use empty one instead + v = m.mf.NewDynamicMessage(fd.GetMessageType()) + } + if err := m.setField(fd, v); err != nil { + return err + } + } else { + // not a message... explicit null makes no sense + return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f) + } + } else { + m.clearField(fd) + } + } + + if err := r.endObject(); err != nil { + return err + } + + return nil +} + +func isWellKnownValue(fd *desc.FieldDescriptor) bool { + return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && + fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value" +} + +func isWellKnownListValue(fd *desc.FieldDescriptor) bool { + // we look for ListValue; but we also look for Value, which can be assigned a ListValue + return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && + (fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" || + fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value") +} + +func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) { + t, err := r.peek() + if err != nil { + return nil, err + } + if t == nil && !isWellKnownValue(fd) { + // if value is null, just return nil + // (unless field is google.protobuf.Value, in which case + // we fall through to parse it as an instance where its + // underlying value is set to a NullValue) + r.poll() + return nil, nil + } + + if t == json.Delim('{') && fd.IsMap() { + entryType := fd.GetMessageType() + keyType := entryType.FindFieldByNumber(1) + valueType := entryType.FindFieldByNumber(2) + mp := map[interface{}]interface{}{} + + // TODO: if there are just two map keys "key" and "value" and they have the right type of values, + // treat this JSON object as a single map entry message. (In keeping with support of map fields as + // if they were normal repeated field of entry messages as well as supporting a transition from + // optional to repeated...) + + if err := r.beginObject(); err != nil { + return nil, err + } + for r.hasNext() { + kk, err := unmarshalJsFieldElement(keyType, r, mf, opts, false) + if err != nil { + return nil, err + } + vv, err := unmarshalJsFieldElement(valueType, r, mf, opts, true) + if err != nil { + return nil, err + } + mp[kk] = vv + } + if err := r.endObject(); err != nil { + return nil, err + } + + return mp, nil + } else if t == json.Delim('[') && !isWellKnownListValue(fd) { + // We support parsing an array, even if field is not repeated, to mimic support in proto + // binary wire format that supports changing an optional field to repeated and vice versa. + // If the field is not repeated, we only keep the last value in the array. + + if err := r.beginArray(); err != nil { + return nil, err + } + var sl []interface{} + var v interface{} + for r.hasNext() { + var err error + v, err = unmarshalJsFieldElement(fd, r, mf, opts, false) + if err != nil { + return nil, err + } + if fd.IsRepeated() && v != nil { + sl = append(sl, v) + } + } + if err := r.endArray(); err != nil { + return nil, err + } + if fd.IsMap() { + mp := map[interface{}]interface{}{} + for _, m := range sl { + msg := m.(*Message) + kk, err := msg.TryGetFieldByNumber(1) + if err != nil { + return nil, err + } + vv, err := msg.TryGetFieldByNumber(2) + if err != nil { + return nil, err + } + mp[kk] = vv + } + return mp, nil + } else if fd.IsRepeated() { + return sl, nil + } else { + return v, nil + } + } else { + // We support parsing a singular value, even if field is repeated, to mimic support in proto + // binary wire format that supports changing an optional field to repeated and vice versa. + // If the field is repeated, we store value as singleton slice of that one value. + + v, err := unmarshalJsFieldElement(fd, r, mf, opts, false) + if err != nil { + return nil, err + } + if v == nil { + return nil, nil + } + if fd.IsRepeated() { + return []interface{}{v}, nil + } else { + return v, nil + } + } +} + +func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler, allowNilMessage bool) (interface{}, error) { + t, err := r.peek() + if err != nil { + return nil, err + } + + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + + if t == nil && allowNilMessage { + // if json is simply "null" return a nil pointer + r.poll() + return nilMessage(fd.GetMessageType()), nil + } + + m := mf.NewMessage(fd.GetMessageType()) + if dm, ok := m.(*Message); ok { + if err := dm.unmarshalJson(r, opts); err != nil { + return nil, err + } + } else { + var msg json.RawMessage + if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil { + return nil, err + } + if err := r.skip(); err != nil { + return nil, err + } + if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil { + return nil, err + } + } + return m, nil + + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if e, err := r.nextNumber(); err != nil { + return nil, err + } else { + // value could be string or number + if i, err := e.Int64(); err != nil { + // number cannot be parsed, so see if it's an enum value name + vd := fd.GetEnumType().FindValueByName(string(e)) + if vd != nil { + return vd.GetNumber(), nil + } else { + return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e) + } + } else if i > math.MaxInt32 || i < math.MinInt32 { + return nil, NumericOverflowError + } else { + return int32(i), err + } + } + + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if i, err := r.nextInt(); err != nil { + return nil, err + } else if i > math.MaxInt32 || i < math.MinInt32 { + return nil, NumericOverflowError + } else { + return int32(i), err + } + + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + return r.nextInt() + + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if i, err := r.nextUint(); err != nil { + return nil, err + } else if i > math.MaxUint32 { + return nil, NumericOverflowError + } else { + return uint32(i), err + } + + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + return r.nextUint() + + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if str, ok := t.(string); ok { + if str == "true" { + r.poll() // consume token + return true, err + } else if str == "false" { + r.poll() // consume token + return false, err + } + } + return r.nextBool() + + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + if f, err := r.nextFloat(); err != nil { + return nil, err + } else { + return float32(f), nil + } + + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + return r.nextFloat() + + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + return r.nextBytes() + + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + return r.nextString() + + default: + return nil, fmt.Errorf("unknown field type: %v", fd.GetType()) + } +} + +type jsReader struct { + reader *bytes.Reader + dec *json.Decoder + current json.Token + peeked bool +} + +func newJsReader(b []byte) *jsReader { + reader := bytes.NewReader(b) + dec := json.NewDecoder(reader) + dec.UseNumber() + return &jsReader{reader: reader, dec: dec} +} + +func (r *jsReader) unread() io.Reader { + bufs := make([]io.Reader, 3) + var peeked []byte + if r.peeked { + if _, ok := r.current.(json.Delim); ok { + peeked = []byte(fmt.Sprintf("%v", r.current)) + } else { + peeked, _ = json.Marshal(r.current) + } + } + readerCopy := *r.reader + decCopy := *r.dec + + bufs[0] = bytes.NewReader(peeked) + bufs[1] = decCopy.Buffered() + bufs[2] = &readerCopy + return &concatReader{bufs: bufs} +} + +func (r *jsReader) hasNext() bool { + return r.dec.More() +} + +func (r *jsReader) peek() (json.Token, error) { + if r.peeked { + return r.current, nil + } + t, err := r.dec.Token() + if err != nil { + return nil, err + } + r.peeked = true + r.current = t + return t, nil +} + +func (r *jsReader) poll() (json.Token, error) { + if r.peeked { + ret := r.current + r.current = nil + r.peeked = false + return ret, nil + } + return r.dec.Token() +} + +func (r *jsReader) beginObject() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'") + return err +} + +func (r *jsReader) endObject() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'") + return err +} + +func (r *jsReader) beginArray() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['") + return err +} + +func (r *jsReader) endArray() error { + _, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'") + return err +} + +func (r *jsReader) nextObjectKey() (string, error) { + return r.nextString() +} + +func (r *jsReader) nextString() (string, error) { + t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string") + if err != nil { + return "", err + } + return t.(string), nil +} + +func (r *jsReader) nextBytes() ([]byte, error) { + str, err := r.nextString() + if err != nil { + return nil, err + } + return base64.StdEncoding.DecodeString(str) +} + +func (r *jsReader) nextBool() (bool, error) { + t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean") + if err != nil { + return false, err + } + return t.(bool), nil +} + +func (r *jsReader) nextInt() (int64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return n.Int64() +} + +func (r *jsReader) nextUint() (uint64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return strconv.ParseUint(string(n), 10, 64) +} + +func (r *jsReader) nextFloat() (float64, error) { + n, err := r.nextNumber() + if err != nil { + return 0, err + } + return n.Float64() +} + +func (r *jsReader) nextNumber() (json.Number, error) { + t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number") + if err != nil { + return "", err + } + switch t := t.(type) { + case json.Number: + return t, nil + case string: + return json.Number(t), nil + } + return "", fmt.Errorf("expecting a number but got %v", t) +} + +func (r *jsReader) skip() error { + t, err := r.poll() + if err != nil { + return err + } + if t == json.Delim('[') { + if err := r.skipArray(); err != nil { + return err + } + } else if t == json.Delim('{') { + if err := r.skipObject(); err != nil { + return err + } + } + return nil +} + +func (r *jsReader) skipArray() error { + for r.hasNext() { + if err := r.skip(); err != nil { + return err + } + } + if err := r.endArray(); err != nil { + return err + } + return nil +} + +func (r *jsReader) skipObject() error { + for r.hasNext() { + // skip object key + if err := r.skip(); err != nil { + return err + } + // and value + if err := r.skip(); err != nil { + return err + } + } + if err := r.endObject(); err != nil { + return err + } + return nil +} + +func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) { + t, err := r.poll() + if err != nil { + return nil, err + } + if t == nil && ifNil != nil { + return ifNil, nil + } + if !predicate(t) { + return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t) + } + return t, nil +} + +type concatReader struct { + bufs []io.Reader + curr int +} + +func (r *concatReader) Read(p []byte) (n int, err error) { + for { + if r.curr >= len(r.bufs) { + err = io.EOF + return + } + var c int + c, err = r.bufs[r.curr].Read(p) + n += c + if err != io.EOF { + return + } + r.curr++ + p = p[c:] + } +} + +// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors +// to resolve message names. It uses the given factory, which may be nil, to +// instantiate messages. The messages that it returns when resolving a type name +// may often be dynamic messages. +func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver { + return &anyResolver{mf: mf, files: files} +} + +type anyResolver struct { + mf *MessageFactory + files []*desc.FileDescriptor + ignored map[*desc.FileDescriptor]struct{} + other jsonpb.AnyResolver +} + +func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) { + if r, ok := r.(*anyResolver); ok { + if _, ok := r.ignored[f]; ok { + // if the current resolver is ignoring this file, it's because another + // (upstream) resolver is already handling it, so nothing to do + return r, false + } + for _, file := range r.files { + if file == f { + // no need to wrap! + return r, false + } + } + // ignore files that will be checked by the resolver we're wrapping + // (we'll just delegate and let it search those files) + ignored := map[*desc.FileDescriptor]struct{}{} + for i := range r.ignored { + ignored[i] = struct{}{} + } + ignore(r.files, ignored) + return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true + } + return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true +} + +func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) { + for _, f := range files { + if _, ok := ignored[f]; ok { + continue + } + ignored[f] = struct{}{} + ignore(f.GetDependencies(), ignored) + } +} + +func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) { + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + + // see if the user-specified resolver is able to do the job + if r.other != nil { + msg, err := r.other.Resolve(typeUrl) + if err == nil { + return msg, nil + } + } + + // try to find the message in our known set of files + checked := map[*desc.FileDescriptor]struct{}{} + for _, f := range r.files { + md := r.findMessage(f, mname, checked) + if md != nil { + return r.mf.NewMessage(md), nil + } + } + // failing that, see if the message factory knows about this type + var ktr *KnownTypeRegistry + if r.mf != nil { + ktr = r.mf.ktr + } else { + ktr = (*KnownTypeRegistry)(nil) + } + m := ktr.CreateIfKnown(mname) + if m != nil { + return m, nil + } + + // no other resolver to fallback to? mimic default behavior + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor { + // if this is an ignored descriptor, skip + if _, ok := r.ignored[fd]; ok { + return nil + } + + // bail if we've already checked this file + if _, ok := checked[fd]; ok { + return nil + } + checked[fd] = struct{}{} + + // see if this file has the message + md := fd.FindMessage(msgName) + if md != nil { + return md + } + + // if not, recursively search the file's imports + for _, dep := range fd.GetDependencies() { + md = r.findMessage(dep, msgName, checked) + if md != nil { + return md + } + } + return nil +} + +var _ jsonpb.AnyResolver = (*anyResolver)(nil) diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go new file mode 100644 index 00000000..69969fc5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go @@ -0,0 +1,131 @@ +//go:build !go1.12 +// +build !go1.12 + +package dynamic + +import ( + "reflect" + + "github.com/jhump/protoreflect/desc" +) + +// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively +// iterate a map. (We can be more efficient in Go 1.12 and up...) + +func mapsEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + if a.Len() == 0 && b.Len() == 0 { + // Optimize the case where maps are frequently empty because MapKeys() + // function allocates heavily. + return true + } + + for _, k := range a.MapKeys() { + av := a.MapIndex(k) + bv := b.MapIndex(k) + if !bv.IsValid() { + return false + } + if !fieldsEqual(av.Interface(), bv.Interface()) { + return false + } + } + return true +} + +func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + // make a defensive copy while we check the contents + // (also converts to map[interface{}]interface{} if it's some other type) + keyField := fd.GetMessageType().GetFields()[0] + valField := fd.GetMessageType().GetFields()[1] + m := map[interface{}]interface{}{} + for _, k := range val.MapKeys() { + if k.Kind() == reflect.Interface { + // unwrap it + k = reflect.ValueOf(k.Interface()) + } + kk, err := validElementFieldValueForRv(keyField, k, false) + if err != nil { + return nil, err + } + v := val.MapIndex(k) + if v.Kind() == reflect.Interface { + // unwrap it + v = reflect.ValueOf(v.Interface()) + } + vv, err := validElementFieldValueForRv(valField, v, true) + if err != nil { + return nil, err + } + m[kk] = vv + } + return m, nil +} + +func canConvertMap(src reflect.Value, target reflect.Type) bool { + kt := target.Key() + vt := target.Elem() + for _, k := range src.MapKeys() { + if !canConvert(k, kt) { + return false + } + if !canConvert(src.MapIndex(k), vt) { + return false + } + } + return true +} + +func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error { + tkt := targetType.Key() + tvt := targetType.Elem() + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + skt := k.Type() + svt := v.Type() + var nk, nv reflect.Value + if tkt == skt { + nk = k + } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt { + nk = k.Addr() + } else { + nk = reflect.New(tkt).Elem() + if err := mergeVal(k, nk, deterministic); err != nil { + return err + } + } + if tvt == svt { + nv = v + } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt { + nv = v.Addr() + } else { + nv = reflect.New(tvt).Elem() + if err := mergeVal(v, nv, deterministic); err != nil { + return err + } + } + if target.IsNil() { + target.Set(reflect.MakeMap(targetType)) + } + target.SetMapIndex(nk, nv) + } + return nil +} + +func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error { + for _, k := range rv.MapKeys() { + if k.Kind() == reflect.Interface && !k.IsNil() { + k = k.Elem() + } + v := rv.MapIndex(k) + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go new file mode 100644 index 00000000..fb353cfc --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go @@ -0,0 +1,139 @@ +//go:build go1.12 +// +build go1.12 + +package dynamic + +import ( + "reflect" + + "github.com/jhump/protoreflect/desc" +) + +// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate +// over maps more efficiently than using reflect.Value.MapKeys. + +func mapsEqual(a, b reflect.Value) bool { + if a.Len() != b.Len() { + return false + } + if a.Len() == 0 && b.Len() == 0 { + // Optimize the case where maps are frequently empty + return true + } + + iter := a.MapRange() + for iter.Next() { + k := iter.Key() + av := iter.Value() + bv := b.MapIndex(k) + if !bv.IsValid() { + return false + } + if !fieldsEqual(av.Interface(), bv.Interface()) { + return false + } + } + return true +} + +func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) { + // make a defensive copy while we check the contents + // (also converts to map[interface{}]interface{} if it's some other type) + keyField := fd.GetMessageType().GetFields()[0] + valField := fd.GetMessageType().GetFields()[1] + m := map[interface{}]interface{}{} + iter := val.MapRange() + for iter.Next() { + k := iter.Key() + if k.Kind() == reflect.Interface { + // unwrap it + k = reflect.ValueOf(k.Interface()) + } + kk, err := validElementFieldValueForRv(keyField, k, false) + if err != nil { + return nil, err + } + v := iter.Value() + if v.Kind() == reflect.Interface { + // unwrap it + v = reflect.ValueOf(v.Interface()) + } + vv, err := validElementFieldValueForRv(valField, v, true) + if err != nil { + return nil, err + } + m[kk] = vv + } + return m, nil +} + +func canConvertMap(src reflect.Value, target reflect.Type) bool { + kt := target.Key() + vt := target.Elem() + iter := src.MapRange() + for iter.Next() { + if !canConvert(iter.Key(), kt) { + return false + } + if !canConvert(iter.Value(), vt) { + return false + } + } + return true +} + +func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error { + tkt := targetType.Key() + tvt := targetType.Elem() + iter := src.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + skt := k.Type() + svt := v.Type() + var nk, nv reflect.Value + if tkt == skt { + nk = k + } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt { + nk = k.Addr() + } else { + nk = reflect.New(tkt).Elem() + if err := mergeVal(k, nk, deterministic); err != nil { + return err + } + } + if tvt == svt { + nv = v + } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt { + nv = v.Addr() + } else { + nv = reflect.New(tvt).Elem() + if err := mergeVal(v, nv, deterministic); err != nil { + return err + } + } + if target.IsNil() { + target.Set(reflect.MakeMap(targetType)) + } + target.SetMapIndex(nk, nv) + } + return nil +} + +func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error { + iter := rv.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + if k.Kind() == reflect.Interface && !k.IsNil() { + k = k.Elem() + } + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go new file mode 100644 index 00000000..ce727fd5 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go @@ -0,0 +1,100 @@ +package dynamic + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// Merge merges the given source message into the given destination message. Use +// use this instead of proto.Merge when one or both of the messages might be a +// a dynamic message. If there is a problem merging the messages, such as the +// two messages having different types, then this method will panic (just as +// proto.Merges does). +func Merge(dst, src proto.Message) { + if dm, ok := dst.(*Message); ok { + if err := dm.MergeFrom(src); err != nil { + panic(err.Error()) + } + } else if dm, ok := src.(*Message); ok { + if err := dm.MergeInto(dst); err != nil { + panic(err.Error()) + } + } else { + proto.Merge(dst, src) + } +} + +// TryMerge merges the given source message into the given destination message. +// You can use this instead of proto.Merge when one or both of the messages +// might be a dynamic message. Unlike proto.Merge, this method will return an +// error on failure instead of panic'ing. +func TryMerge(dst, src proto.Message) error { + if dm, ok := dst.(*Message); ok { + if err := dm.MergeFrom(src); err != nil { + return err + } + } else if dm, ok := src.(*Message); ok { + if err := dm.MergeInto(dst); err != nil { + return err + } + } else { + // proto.Merge panics on bad input, so we first verify + // inputs and return error instead of panic + out := reflect.ValueOf(dst) + if out.IsNil() { + return errors.New("proto: nil destination") + } + in := reflect.ValueOf(src) + if in.Type() != out.Type() { + return errors.New("proto: type mismatch") + } + proto.Merge(dst, src) + } + return nil +} + +func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error { + rv := reflect.ValueOf(val) + + if fd.IsMap() && rv.Kind() == reflect.Map { + return mergeMapField(m, fd, rv) + } + + if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes { + for i := 0; i < rv.Len(); i++ { + e := rv.Index(i) + if e.Kind() == reflect.Interface && !e.IsNil() { + e = e.Elem() + } + if err := m.addRepeatedField(fd, e.Interface()); err != nil { + return err + } + } + return nil + } + + if fd.IsRepeated() { + return m.addRepeatedField(fd, val) + } else if fd.GetMessageType() == nil { + return m.setField(fd, val) + } + + // it's a message type, so we want to merge contents + var err error + if val, err = validFieldValue(fd, val); err != nil { + return err + } + + existing, _ := m.doGetField(fd, true) + if existing != nil && !reflect.ValueOf(existing).IsNil() { + return TryMerge(existing.(proto.Message), val.(proto.Message)) + } + + // no existing message, so just set field + m.internalSetField(fd, val) + return nil +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go new file mode 100644 index 00000000..683e7b33 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go @@ -0,0 +1,207 @@ +package dynamic + +import ( + "reflect" + "sync" + + "github.com/golang/protobuf/proto" + + "github.com/jhump/protoreflect/desc" +) + +// MessageFactory can be used to create new empty message objects. A default instance +// (without extension registry or known-type registry specified) will always return +// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types. +// The well-known types include primitive wrapper types and a handful of other special +// types defined in standard protobuf definitions, like Any, Duration, and Timestamp. +type MessageFactory struct { + er *ExtensionRegistry + ktr *KnownTypeRegistry +} + +// NewMessageFactoryWithExtensionRegistry creates a new message factory where any +// dynamic messages produced will use the given extension registry to recognize and +// parse extension fields. +func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory { + return NewMessageFactoryWithRegistries(er, nil) +} + +// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the +// known types, per the given registry, will be returned as normal protobuf messages +// (e.g. generated structs, instead of dynamic messages). +func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory { + return NewMessageFactoryWithRegistries(nil, ktr) +} + +// NewMessageFactoryWithDefaults creates a new message factory where all "default" types +// (those for which protoc-generated code is statically linked into the Go program) are +// known types. If any dynamic messages are produced, they will recognize and parse all +// "default" extension fields. This is the equivalent of: +// +// NewMessageFactoryWithRegistries( +// NewExtensionRegistryWithDefaults(), +// NewKnownTypeRegistryWithDefaults()) +func NewMessageFactoryWithDefaults() *MessageFactory { + return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults()) +} + +// NewMessageFactoryWithRegistries creates a new message factory with the given extension +// and known type registries. +func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory { + return &MessageFactory{ + er: er, + ktr: ktr, + } +} + +// NewMessage creates a new empty message that corresponds to the given descriptor. +// If the given descriptor describes a "known type" then that type is instantiated. +// Otherwise, an empty dynamic message is returned. +func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message { + var ktr *KnownTypeRegistry + if f != nil { + ktr = f.ktr + } + if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil { + return m + } + return NewMessageWithMessageFactory(md, f) +} + +// NewDynamicMessage creates a new empty dynamic message that corresponds to the given +// descriptor. This is like f.NewMessage(md) except the known type registry is not +// consulted so the return value is always a dynamic message. +// +// This is also like dynamic.NewMessage(md) except that the returned message will use +// this factory when creating other messages, like during de-serialization of fields +// that are themselves message types. +func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message { + return NewMessageWithMessageFactory(md, f) +} + +// GetKnownTypeRegistry returns the known type registry that this factory uses to +// instantiate known (e.g. generated) message types. +func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry { + if f == nil { + return nil + } + return f.ktr +} + +// GetExtensionRegistry returns the extension registry that this factory uses to +// create dynamic messages. The registry is used by dynamic messages to recognize +// and parse extension fields during de-serialization. +func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry { + if f == nil { + return nil + } + return f.er +} + +type wkt interface { + XXX_WellKnownType() string +} + +var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem() + +// KnownTypeRegistry is a registry of known message types, as identified by their +// fully-qualified name. A known message type is one for which a protoc-generated +// struct exists, so a dynamic message is not necessary to represent it. A +// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated +// struct or a dynamic message. The zero-value registry (including the behavior of +// a nil pointer) only knows about the "well-known types" in protobuf. These +// include only the wrapper types and a handful of other special types like Any, +// Duration, and Timestamp. +type KnownTypeRegistry struct { + excludeWkt bool + includeDefault bool + mu sync.RWMutex + types map[string]reflect.Type +} + +// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all +// "default" types (those for which protoc-generated code is statically linked +// into the Go program). +func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry { + return &KnownTypeRegistry{includeDefault: true} +} + +// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not* +// include the "well-known types" in protobuf. So even well-known types would be +// represented by a dynamic message. +func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry { + return &KnownTypeRegistry{excludeWkt: true} +} + +// AddKnownType adds the types of the given messages as known types. +func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) { + r.mu.Lock() + defer r.mu.Unlock() + if r.types == nil { + r.types = map[string]reflect.Type{} + } + for _, kt := range kts { + r.types[proto.MessageName(kt)] = reflect.TypeOf(kt) + } +} + +// CreateIfKnown will construct an instance of the given message if it is a known type. +// If the given name is unknown, nil is returned. +func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message { + msgType := r.GetKnownType(messageName) + if msgType == nil { + return nil + } + + if msgType.Kind() == reflect.Ptr { + return reflect.New(msgType.Elem()).Interface().(proto.Message) + } else { + return reflect.New(msgType).Elem().Interface().(proto.Message) + } +} + +func isWellKnownType(t reflect.Type) bool { + if t.Implements(typeOfWkt) { + return true + } + if msg, ok := reflect.Zero(t).Interface().(proto.Message); ok { + name := proto.MessageName(msg) + _, ok := wellKnownTypeNames[name] + return ok + } + return false +} + +// GetKnownType will return the reflect.Type for the given message name if it is +// known. If it is not known, nil is returned. +func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type { + if r == nil { + // a nil registry behaves the same as zero value instance: only know of well-known types + t := proto.MessageType(messageName) + if t != nil && isWellKnownType(t) { + return t + } + return nil + } + + if r.includeDefault { + t := proto.MessageType(messageName) + if t != nil && isMessage(t) { + return t + } + } else if !r.excludeWkt { + t := proto.MessageType(messageName) + if t != nil && isWellKnownType(t) { + return t + } + } + + r.mu.RLock() + defer r.mu.RUnlock() + return r.types[messageName] +} + +func isMessage(t reflect.Type) bool { + _, ok := reflect.Zero(t).Interface().(proto.Message) + return ok +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go new file mode 100644 index 00000000..5680dc2d --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go @@ -0,0 +1,1177 @@ +package dynamic + +// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "text/scanner" + "unicode" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/codec" + "github.com/jhump/protoreflect/desc" +) + +// MarshalText serializes this message to bytes in the standard text format, +// returning an error if the operation fails. The resulting bytes will be a +// valid UTF8 string. +// +// This method uses a compact form: no newlines, and spaces between field +// identifiers and values are elided. +func (m *Message) MarshalText() ([]byte, error) { + var b indentBuffer + b.indentCount = -1 // no indentation + if err := m.marshalText(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// MarshalTextIndent serializes this message to bytes in the standard text +// format, returning an error if the operation fails. The resulting bytes will +// be a valid UTF8 string. +// +// This method uses a "pretty-printed" form, with each field on its own line and +// spaces between field identifiers and values. +func (m *Message) MarshalTextIndent() ([]byte, error) { + var b indentBuffer + b.indent = " " // TODO: option for indent? + if err := m.marshalText(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (m *Message) marshalText(b *indentBuffer) error { + // TODO: option for emitting extended Any format? + first := true + // first the known fields + for _, tag := range m.knownFieldTags() { + itag := int32(tag) + v := m.values[itag] + fd := m.FindFieldDescriptor(itag) + if fd.IsMap() { + md := fd.GetMessageType() + kfd := md.FindFieldByNumber(1) + vfd := md.FindFieldByNumber(2) + mp := v.(map[interface{}]interface{}) + keys := make([]interface{}, 0, len(mp)) + for k := range mp { + keys = append(keys, k) + } + sort.Sort(sortable(keys)) + for _, mk := range keys { + mv := mp[mk] + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv) + if err != nil { + return err + } + } + } else if fd.IsRepeated() { + sl := v.([]interface{}) + for _, slv := range sl { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldText(b, fd, slv) + if err != nil { + return err + } + } + } else { + err := b.maybeNext(&first) + if err != nil { + return err + } + err = marshalKnownFieldText(b, fd, v) + if err != nil { + return err + } + } + } + // then the unknown fields + for _, tag := range m.unknownFieldTags() { + itag := int32(tag) + ufs := m.unknownFields[itag] + for _, uf := range ufs { + err := b.maybeNext(&first) + if err != nil { + return err + } + _, err = fmt.Fprintf(b, "%d", tag) + if err != nil { + return err + } + if uf.Encoding == proto.WireStartGroup { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + in := codec.NewBuffer(uf.Contents) + err = marshalUnknownGroupText(b, in, true) + if err != nil { + return err + } + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + } else { + err = b.sep() + if err != nil { + return err + } + if uf.Encoding == proto.WireBytes { + err = writeString(b, string(uf.Contents)) + if err != nil { + return err + } + } else { + _, err = b.WriteString(strconv.FormatUint(uf.Value, 10)) + if err != nil { + return err + } + } + } + } + } + return nil +} + +func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName()) + } else { + name = fd.GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + + err = b.WriteByte('<') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + + err = marshalKnownFieldText(b, kfd, mk) + if err != nil { + return err + } + err = b.next() + if err != nil { + return err + } + if !isNil(mv) { + err = marshalKnownFieldText(b, vfd, mv) + if err != nil { + return err + } + } + + err = b.end() + if err != nil { + return err + } + return b.WriteByte('>') +} + +func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error { + group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP + if group { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName()) + } else { + name = fd.GetMessageType().GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + } else { + var name string + if fd.IsExtension() { + name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName()) + } else { + name = fd.GetName() + } + _, err := b.WriteString(name) + if err != nil { + return err + } + err = b.sep() + if err != nil { + return err + } + } + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Int32, reflect.Int64: + ed := fd.GetEnumType() + if ed != nil { + n := int32(rv.Int()) + vd := ed.FindValueByNumber(n) + if vd == nil { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } else { + _, err := b.WriteString(vd.GetName()) + return err + } + } else { + _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10)) + return err + } + case reflect.Uint32, reflect.Uint64: + _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10)) + return err + case reflect.Float32, reflect.Float64: + f := rv.Float() + var str string + if math.IsNaN(f) { + str = "nan" + } else if math.IsInf(f, 1) { + str = "inf" + } else if math.IsInf(f, -1) { + str = "-inf" + } else { + var bits int + if rv.Kind() == reflect.Float32 { + bits = 32 + } else { + bits = 64 + } + str = strconv.FormatFloat(rv.Float(), 'g', -1, bits) + } + _, err := b.WriteString(str) + return err + case reflect.Bool: + _, err := b.WriteString(strconv.FormatBool(rv.Bool())) + return err + case reflect.Slice: + return writeString(b, string(rv.Bytes())) + case reflect.String: + return writeString(b, rv.String()) + default: + var err error + if group { + err = b.WriteByte('{') + } else { + err = b.WriteByte('<') + } + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + // must be a message + if dm, ok := v.(*Message); ok { + err = dm.marshalText(b) + if err != nil { + return err + } + } else { + err = proto.CompactText(b, v.(proto.Message)) + if err != nil { + return err + } + } + err = b.end() + if err != nil { + return err + } + if group { + return b.WriteByte('}') + } else { + return b.WriteByte('>') + } + } +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(b *indentBuffer, s string) error { + // use WriteByte here to get any needed indent + if err := b.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = b.WriteString("\\n") + case '\r': + _, err = b.WriteString("\\r") + case '\t': + _, err = b.WriteString("\\t") + case '"': + _, err = b.WriteString("\\\"") + case '\\': + _, err = b.WriteString("\\\\") + default: + if c >= 0x20 && c < 0x7f { + err = b.WriteByte(c) + } else { + _, err = fmt.Fprintf(b, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return b.WriteByte('"') +} + +func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error { + first := true + for { + if in.EOF() { + if topLevel { + return nil + } + // this is a nested message: we are expecting an end-group tag, not EOF! + return io.ErrUnexpectedEOF + } + tag, wireType, err := in.DecodeTagAndWireType() + if err != nil { + return err + } + if wireType == proto.WireEndGroup { + return nil + } + err = b.maybeNext(&first) + if err != nil { + return err + } + _, err = fmt.Fprintf(b, "%d", tag) + if err != nil { + return err + } + if wireType == proto.WireStartGroup { + err = b.WriteByte('{') + if err != nil { + return err + } + err = b.start() + if err != nil { + return err + } + err = marshalUnknownGroupText(b, in, false) + if err != nil { + return err + } + err = b.end() + if err != nil { + return err + } + err = b.WriteByte('}') + if err != nil { + return err + } + continue + } else { + err = b.sep() + if err != nil { + return err + } + if wireType == proto.WireBytes { + contents, err := in.DecodeRawBytes(false) + if err != nil { + return err + } + err = writeString(b, string(contents)) + if err != nil { + return err + } + } else { + var v uint64 + switch wireType { + case proto.WireVarint: + v, err = in.DecodeVarint() + case proto.WireFixed32: + v, err = in.DecodeFixed32() + case proto.WireFixed64: + v, err = in.DecodeFixed64() + default: + return proto.ErrInternalBadWireType + } + if err != nil { + return err + } + _, err = b.WriteString(strconv.FormatUint(v, 10)) + if err != nil { + return err + } + } + } + } +} + +// UnmarshalText de-serializes the message that is present, in text format, in +// the given bytes into this message. It first resets the current message. It +// returns an error if the given bytes do not contain a valid encoding of this +// message type in the standard text format +func (m *Message) UnmarshalText(text []byte) error { + m.Reset() + if err := m.UnmarshalMergeText(text); err != nil { + return err + } + return m.Validate() +} + +// UnmarshalMergeText de-serializes the message that is present, in text format, +// in the given bytes into this message. Unlike UnmarshalText, it does not first +// reset the message, instead merging the data in the given bytes into the +// existing data in this message. +func (m *Message) UnmarshalMergeText(text []byte) error { + return m.unmarshalText(newReader(text), tokenEOF) +} + +func (m *Message) unmarshalText(tr *txtReader, end tokenType) error { + for { + tok := tr.next() + if tok.tokTyp == end { + return nil + } + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + var fd *desc.FieldDescriptor + var extendedAnyType *desc.MessageDescriptor + if tok.tokTyp == tokenInt { + // tag number (indicates unknown field) + tag, err := strconv.ParseInt(tok.val.(string), 10, 32) + if err != nil { + return err + } + itag := int32(tag) + fd = m.FindFieldDescriptor(itag) + if fd == nil { + // can't parse the value w/out field descriptor, so skip it + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenOpenBrace { + if err := skipMessageText(tr, true); err != nil { + return err + } + } else if tok.tokTyp == tokenColon { + if err := skipFieldValueText(tr); err != nil { + return err + } + } else { + return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt) + } + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + continue + } + } else { + fieldName, err := unmarshalFieldNameText(tr, tok) + if err != nil { + return err + } + fd = m.FindFieldDescriptorByName(fieldName) + if fd == nil { + // See if it's a group name + for _, field := range m.md.GetFields() { + if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName { + fd = field + break + } + } + if fd == nil { + // maybe this is an extended Any + if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") { + // strip surrounding "[" and "]" and extract type name from URL + typeUrl := fieldName[1 : len(fieldName)-1] + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + // TODO: add a way to weave an AnyResolver to this point + extendedAnyType = findMessageDescriptor(mname, m.md.GetFile()) + if extendedAnyType == nil { + return textError(tok, "could not parse Any with unknown type URL %q", fieldName) + } + // field 1 is "type_url" + typeUrlField := m.md.FindFieldByNumber(1) + if err := m.TrySetField(typeUrlField, typeUrl); err != nil { + return err + } + } else { + // TODO: add a flag to just ignore unrecognized field names + return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName()) + } + } + } + } + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + if extendedAnyType != nil { + // consume optional colon; make sure this is a "start message" token + if tok.tokTyp == tokenColon { + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + } + if tok.tokTyp.EndToken() == tokenError { + return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt) + } + + // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it + g := m.mf.NewDynamicMessage(extendedAnyType) + if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil { + return err + } + // now we marshal the message to bytes and store in the Any + b, err := g.Marshal() + if err != nil { + return err + } + // field 2 is "value" + anyValueField := m.md.FindFieldByNumber(2) + if err := m.TrySetField(anyValueField, b); err != nil { + return err + } + + } else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || + fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) && + tok.tokTyp.EndToken() != tokenError { + + // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it + g := m.mf.NewDynamicMessage(fd.GetMessageType()) + if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil { + return err + } + if fd.IsRepeated() { + if err := m.TryAddRepeatedField(fd, g); err != nil { + return err + } + } else { + if err := m.TrySetField(fd, g); err != nil { + return err + } + } + } else { + if tok.tokTyp != tokenColon { + return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt) + } + if err := m.unmarshalFieldValueText(fd, tr); err != nil { + return err + } + } + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } +} +func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor { + md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{}) + if md == nil { + // couldn't find it; see if we have this message linked in + md, _ = desc.LoadMessageDescriptor(name) + } + return md +} + +func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor { + if _, ok := seen[fd]; ok { + // already checked this file + return nil + } + seen[fd] = struct{}{} + md := fd.FindMessage(name) + if md != nil { + return md + } + // not in this file so recursively search its deps + for _, dep := range fd.GetDependencies() { + md = findMessageInTransitiveDeps(name, dep, seen) + if md != nil { + return md + } + } + // couldn't find it + return nil +} + +func textError(tok *token, format string, args ...interface{}) error { + var msg string + if tok.tokTyp == tokenError { + msg = tok.val.(error).Error() + } else { + msg = fmt.Sprintf(format, args...) + } + return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg) +} + +type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error + +func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error { + var set setFunction + if fd.IsRepeated() { + set = (*Message).addRepeatedField + } else { + set = mergeField + } + tok := tr.peek() + if tok.tokTyp == tokenOpenBracket { + tr.next() // consume tok + for { + if err := m.unmarshalFieldElementText(fd, tr, set); err != nil { + return err + } + tok = tr.peek() + if tok.tokTyp == tokenCloseBracket { + tr.next() // consume tok + return nil + } else if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } + } + return m.unmarshalFieldElementText(fd, tr, set) +} + +func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error { + tok := tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } + + var expected string + switch fd.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_BOOL: + if tok.tokTyp == tokenIdent { + if tok.val.(string) == "true" { + return set(m, fd, true) + } else if tok.val.(string) == "false" { + return set(m, fd, false) + } + } + expected = "boolean value" + case descriptorpb.FieldDescriptorProto_TYPE_BYTES: + if tok.tokTyp == tokenString { + return set(m, fd, []byte(tok.val.(string))) + } + expected = "bytes string value" + case descriptorpb.FieldDescriptorProto_TYPE_STRING: + if tok.tokTyp == tokenString { + return set(m, fd, tok.val) + } + expected = "string value" + case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: + switch tok.tokTyp { + case tokenFloat: + return set(m, fd, float32(tok.val.(float64))) + case tokenInt: + if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil { + return err + } else { + return set(m, fd, float32(f)) + } + case tokenIdent: + ident := strings.ToLower(tok.val.(string)) + if ident == "inf" { + return set(m, fd, float32(math.Inf(1))) + } else if ident == "nan" { + return set(m, fd, float32(math.NaN())) + } + case tokenMinus: + peeked := tr.peek() + if peeked.tokTyp == tokenIdent { + ident := strings.ToLower(peeked.val.(string)) + if ident == "inf" { + tr.next() // consume peeked token + return set(m, fd, float32(math.Inf(-1))) + } + } + } + expected = "float value" + case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: + switch tok.tokTyp { + case tokenFloat: + return set(m, fd, tok.val) + case tokenInt: + if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil { + return err + } else { + return set(m, fd, f) + } + case tokenIdent: + ident := strings.ToLower(tok.val.(string)) + if ident == "inf" { + return set(m, fd, math.Inf(1)) + } else if ident == "nan" { + return set(m, fd, math.NaN()) + } + case tokenMinus: + peeked := tr.peek() + if peeked.tokTyp == tokenIdent { + ident := strings.ToLower(peeked.val.(string)) + if ident == "inf" { + tr.next() // consume peeked token + return set(m, fd, math.Inf(-1)) + } + } + } + expected = "float value" + case descriptorpb.FieldDescriptorProto_TYPE_INT32, + descriptorpb.FieldDescriptorProto_TYPE_SINT32, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, int32(i)) + } + } + expected = "int value" + case descriptorpb.FieldDescriptorProto_TYPE_INT64, + descriptorpb.FieldDescriptorProto_TYPE_SINT64, + descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil { + return err + } else { + return set(m, fd, i) + } + } + expected = "int value" + case descriptorpb.FieldDescriptorProto_TYPE_UINT32, + descriptorpb.FieldDescriptorProto_TYPE_FIXED32: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, uint32(i)) + } + } + expected = "unsigned int value" + case descriptorpb.FieldDescriptorProto_TYPE_UINT64, + descriptorpb.FieldDescriptorProto_TYPE_FIXED64: + if tok.tokTyp == tokenInt { + if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil { + return err + } else { + return set(m, fd, i) + } + } + expected = "unsigned int value" + case descriptorpb.FieldDescriptorProto_TYPE_ENUM: + if tok.tokTyp == tokenIdent { + // TODO: add a flag to just ignore unrecognized enum value names? + vd := fd.GetEnumType().FindValueByName(tok.val.(string)) + if vd != nil { + return set(m, fd, vd.GetNumber()) + } + } else if tok.tokTyp == tokenInt { + if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil { + return err + } else { + return set(m, fd, int32(i)) + } + } + expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName()) + case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_GROUP: + + endTok := tok.tokTyp.EndToken() + if endTok != tokenError { + dm := m.mf.NewDynamicMessage(fd.GetMessageType()) + if err := dm.unmarshalText(tr, endTok); err != nil { + return err + } + // TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use + // proto package to unmarshal it. But the text parser isn't particularly amenable + // to that, so we instead convert a dynamic message to a generated one if the + // known-type registry knows about the generated type... + var ktr *KnownTypeRegistry + if m.mf != nil { + ktr = m.mf.ktr + } + pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName()) + if pm != nil { + if err := dm.ConvertTo(pm); err != nil { + return set(m, fd, pm) + } + } + return set(m, fd, dm) + } + expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName()) + default: + return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType()) + } + + // if we get here, token was wrong type; create error message + var article string + if strings.Contains("aieou", expected[0:1]) { + article = "an" + } else { + article = "a" + } + return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt) +} + +func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) { + if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen { + // extension name + var closeType tokenType + var closeChar string + if tok.tokTyp == tokenOpenBracket { + closeType = tokenCloseBracket + closeChar = "close bracket ']'" + } else { + closeType = tokenCloseParen + closeChar = "close paren ')'" + } + // must be followed by an identifier + idents := make([]string, 0, 1) + for { + tok = tr.next() + if tok.tokTyp == tokenEOF { + return "", io.ErrUnexpectedEOF + } else if tok.tokTyp != tokenIdent { + return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt) + } + idents = append(idents, tok.val.(string)) + // and then close bracket/paren, or "/" to keep adding URL elements to name + tok = tr.next() + if tok.tokTyp == tokenEOF { + return "", io.ErrUnexpectedEOF + } else if tok.tokTyp == closeType { + break + } else if tok.tokTyp != tokenSlash { + return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt) + } + } + return "[" + strings.Join(idents, "/") + "]", nil + } else if tok.tokTyp == tokenIdent { + // normal field name + return tok.val.(string), nil + } else { + return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt) + } +} + +func skipFieldNameText(tr *txtReader) error { + tok := tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent { + return nil + } else { + _, err := unmarshalFieldNameText(tr, tok) + return err + } +} + +func skipFieldValueText(tr *txtReader) error { + tok := tr.peek() + if tok.tokTyp == tokenOpenBracket { + tr.next() // consume tok + for { + if err := skipFieldElementText(tr); err != nil { + return err + } + tok = tr.peek() + if tok.tokTyp == tokenCloseBracket { + tr.next() // consume tok + return nil + } else if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + + } + } + return skipFieldElementText(tr) +} + +func skipFieldElementText(tr *txtReader) error { + tok := tr.next() + switch tok.tokTyp { + case tokenEOF: + return io.ErrUnexpectedEOF + case tokenInt, tokenFloat, tokenString, tokenIdent: + return nil + case tokenOpenAngle: + return skipMessageText(tr, false) + default: + return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt) + } +} + +func skipMessageText(tr *txtReader, isGroup bool) error { + for { + tok := tr.peek() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if isGroup && tok.tokTyp == tokenCloseBrace { + return nil + } else if !isGroup && tok.tokTyp == tokenCloseAngle { + return nil + } + + // field name or tag + if err := skipFieldNameText(tr); err != nil { + return err + } + + // field value + tok = tr.next() + if tok.tokTyp == tokenEOF { + return io.ErrUnexpectedEOF + } else if tok.tokTyp == tokenOpenBrace { + if err := skipMessageText(tr, true); err != nil { + return err + } + } else if tok.tokTyp == tokenColon { + if err := skipFieldValueText(tr); err != nil { + return err + } + } else { + return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt) + } + + tok = tr.peek() + if tok.tokTyp.IsSep() { + tr.next() // consume separator + } + } +} + +type tokenType int + +const ( + tokenError tokenType = iota + tokenEOF + tokenIdent + tokenString + tokenInt + tokenFloat + tokenColon + tokenComma + tokenSemiColon + tokenOpenBrace + tokenCloseBrace + tokenOpenBracket + tokenCloseBracket + tokenOpenAngle + tokenCloseAngle + tokenOpenParen + tokenCloseParen + tokenSlash + tokenMinus +) + +func (t tokenType) IsSep() bool { + return t == tokenComma || t == tokenSemiColon +} + +func (t tokenType) EndToken() tokenType { + switch t { + case tokenOpenAngle: + return tokenCloseAngle + case tokenOpenBrace: + return tokenCloseBrace + default: + return tokenError + } +} + +type token struct { + tokTyp tokenType + val interface{} + txt string + pos scanner.Position +} + +type txtReader struct { + scanner scanner.Scanner + peeked token + havePeeked bool +} + +func newReader(text []byte) *txtReader { + sc := scanner.Scanner{} + sc.Init(bytes.NewReader(text)) + sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | + scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + // identifiers are same restrictions as Go identifiers, except we also allow dots since + // we accept fully-qualified names + sc.IsIdentRune = func(ch rune, i int) bool { + return ch == '_' || unicode.IsLetter(ch) || + (i > 0 && unicode.IsDigit(ch)) || + (i > 0 && ch == '.') + } + // ignore errors; we handle them if/when we see malformed tokens + sc.Error = func(s *scanner.Scanner, msg string) {} + return &txtReader{scanner: sc} +} + +func (p *txtReader) peek() *token { + if p.havePeeked { + return &p.peeked + } + t := p.scanner.Scan() + if t == scanner.EOF { + p.peeked.tokTyp = tokenEOF + p.peeked.val = nil + p.peeked.txt = "" + p.peeked.pos = p.scanner.Position + } else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil { + p.peeked.tokTyp = tokenError + p.peeked.val = err + } + p.havePeeked = true + return &p.peeked +} + +func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error { + p.peeked.pos = pos + p.peeked.txt = text + switch t { + case scanner.Ident: + p.peeked.tokTyp = tokenIdent + p.peeked.val = text + case scanner.Int: + p.peeked.tokTyp = tokenInt + p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned + case scanner.Float: + p.peeked.tokTyp = tokenFloat + var err error + if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil { + return err + } + case scanner.Char, scanner.String: + p.peeked.tokTyp = tokenString + var err error + if p.peeked.val, err = strconv.Unquote(text); err != nil { + return err + } + case '-': // unary minus, for negative ints and floats + ch := p.scanner.Peek() + if ch < '0' || ch > '9' { + p.peeked.tokTyp = tokenMinus + p.peeked.val = '-' + } else { + t := p.scanner.Scan() + if t == scanner.EOF { + return io.ErrUnexpectedEOF + } else if t == scanner.Float { + p.peeked.tokTyp = tokenFloat + text += p.scanner.TokenText() + p.peeked.txt = text + var err error + if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil { + p.peeked.pos = p.scanner.Position + return err + } + } else if t == scanner.Int { + p.peeked.tokTyp = tokenInt + text += p.scanner.TokenText() + p.peeked.txt = text + p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned + } else { + p.peeked.pos = p.scanner.Position + return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText()) + } + } + case ':': + p.peeked.tokTyp = tokenColon + p.peeked.val = ':' + case ',': + p.peeked.tokTyp = tokenComma + p.peeked.val = ',' + case ';': + p.peeked.tokTyp = tokenSemiColon + p.peeked.val = ';' + case '{': + p.peeked.tokTyp = tokenOpenBrace + p.peeked.val = '{' + case '}': + p.peeked.tokTyp = tokenCloseBrace + p.peeked.val = '}' + case '<': + p.peeked.tokTyp = tokenOpenAngle + p.peeked.val = '<' + case '>': + p.peeked.tokTyp = tokenCloseAngle + p.peeked.val = '>' + case '[': + p.peeked.tokTyp = tokenOpenBracket + p.peeked.val = '[' + case ']': + p.peeked.tokTyp = tokenCloseBracket + p.peeked.val = ']' + case '(': + p.peeked.tokTyp = tokenOpenParen + p.peeked.val = '(' + case ')': + p.peeked.tokTyp = tokenCloseParen + p.peeked.val = ')' + case '/': + // only allowed to separate URL components in expanded Any format + p.peeked.tokTyp = tokenSlash + p.peeked.val = '/' + default: + return fmt.Errorf("invalid character: %c", t) + } + return nil +} + +func (p *txtReader) next() *token { + t := p.peek() + if t.tokTyp != tokenEOF && t.tokTyp != tokenError { + p.havePeeked = false + } + return t +} diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go new file mode 100644 index 00000000..09f8849e --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go @@ -0,0 +1,118 @@ +package codec + +import ( + "fmt" + "io" +) + +// Buffer is a reader and a writer that wraps a slice of bytes and also +// provides API for decoding and encoding the protobuf binary format. +// +// Its operation is similar to that of a bytes.Buffer: writing pushes +// data to the end of the buffer while reading pops data from the head +// of the buffer. So the same buffer can be used to both read and write. +type Buffer struct { + buf []byte + index int + + // tmp is used when another byte slice is needed, such as when + // serializing messages, since we need to know the length before + // we can write the length prefix; by caching this, including + // after it is grown by serialization operations, we reduce the + // number of allocations needed + tmp []byte + + deterministic bool +} + +// NewBuffer creates a new buffer with the given slice of bytes as the +// buffer's initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic sets this buffer to encode messages deterministically. This +// is useful for tests. But the overhead is non-zero, so it should not likely be +// used outside of tests. When true, map fields in a message must have their +// keys sorted before serialization to ensure deterministic output. Otherwise, +// values in a map field will be serialized in map iteration order. +func (cb *Buffer) SetDeterministic(deterministic bool) { + cb.deterministic = deterministic +} + +// IsDeterministic returns whether or not this buffer is configured to encode +// messages deterministically. +func (cb *Buffer) IsDeterministic() bool { + return cb.deterministic +} + +// Reset resets this buffer back to empty. Any subsequent writes/encodes +// to the buffer will allocate a new backing slice of bytes. +func (cb *Buffer) Reset() { + cb.buf = []byte(nil) + cb.index = 0 +} + +// Bytes returns the slice of bytes remaining in the buffer. Note that +// this does not perform a copy: if the contents of the returned slice +// are modified, the modifications will be visible to subsequent reads +// via the buffer. +func (cb *Buffer) Bytes() []byte { + return cb.buf[cb.index:] +} + +// String returns the remaining bytes in the buffer as a string. +func (cb *Buffer) String() string { + return string(cb.Bytes()) +} + +// EOF returns true if there are no more bytes remaining to read. +func (cb *Buffer) EOF() bool { + return cb.index >= len(cb.buf) +} + +// Skip attempts to skip the given number of bytes in the input. If +// the input has fewer bytes than the given count, io.ErrUnexpectedEOF +// is returned and the buffer is unchanged. Otherwise, the given number +// of bytes are skipped and nil is returned. +func (cb *Buffer) Skip(count int) error { + if count < 0 { + return fmt.Errorf("proto: bad byte length %d", count) + } + newIndex := cb.index + count + if newIndex < cb.index || newIndex > len(cb.buf) { + return io.ErrUnexpectedEOF + } + cb.index = newIndex + return nil +} + +// Len returns the remaining number of bytes in the buffer. +func (cb *Buffer) Len() int { + return len(cb.buf) - cb.index +} + +// Read implements the io.Reader interface. If there are no bytes +// remaining in the buffer, it will return 0, io.EOF. Otherwise, +// it reads max(len(dest), cb.Len()) bytes from input and copies +// them into dest. It returns the number of bytes copied and a nil +// error in this case. +func (cb *Buffer) Read(dest []byte) (int, error) { + if cb.index == len(cb.buf) { + return 0, io.EOF + } + copied := copy(dest, cb.buf[cb.index:]) + cb.index += copied + return copied, nil +} + +var _ io.Reader = (*Buffer)(nil) + +// Write implements the io.Writer interface. It always returns +// len(data), nil. +func (cb *Buffer) Write(data []byte) (int, error) { + cb.buf = append(cb.buf, data...) + return len(data), nil +} + +var _ io.Writer = (*Buffer)(nil) diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/decode.go b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go new file mode 100644 index 00000000..a25f680f --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go @@ -0,0 +1,346 @@ +package codec + +import ( + "errors" + "fmt" + "io" + "math" + + "github.com/golang/protobuf/proto" +) + +// ErrOverflow is returned when an integer is too large to be represented. +var ErrOverflow = errors.New("proto: integer overflow") + +// ErrBadWireType is returned when decoding a wire-type from a buffer that +// is not valid. +var ErrBadWireType = errors.New("proto: bad wiretype") + +func (cb *Buffer) decodeVarintSlow() (x uint64, err error) { + i := cb.index + l := len(cb.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := cb.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + cb.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = ErrOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) DecodeVarint() (uint64, error) { + i := cb.index + buf := cb.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + cb.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return cb.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x := uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, ErrOverflow + +done: + cb.index = i + return x, nil +} + +// DecodeTagAndWireType decodes a field tag and wire type from input. +// This reads a varint and then extracts the two fields from the varint +// value read. +func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) { + var v uint64 + v, err = cb.DecodeVarint() + if err != nil { + return + } + // low 7 bits is wire type + wireType = int8(v & 7) + // rest is int32 tag number + v = v >> 3 + if v > math.MaxInt32 { + err = fmt.Errorf("tag number out of range: %d", v) + return + } + tag = int32(v) + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := cb.index + 8 + if i < 0 || i > len(cb.buf) { + err = io.ErrUnexpectedEOF + return + } + cb.index = i + + x = uint64(cb.buf[i-8]) + x |= uint64(cb.buf[i-7]) << 8 + x |= uint64(cb.buf[i-6]) << 16 + x |= uint64(cb.buf[i-5]) << 24 + x |= uint64(cb.buf[i-4]) << 32 + x |= uint64(cb.buf[i-3]) << 40 + x |= uint64(cb.buf[i-2]) << 48 + x |= uint64(cb.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := cb.index + 4 + if i < 0 || i > len(cb.buf) { + err = io.ErrUnexpectedEOF + return + } + cb.index = i + + x = uint64(cb.buf[i-4]) + x |= uint64(cb.buf[i-3]) << 8 + x |= uint64(cb.buf[i-2]) << 16 + x |= uint64(cb.buf[i-1]) << 24 + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := cb.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := cb.index + nb + if end < cb.index || end > len(cb.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + buf = cb.buf[cb.index:end] + cb.index = end + return + } + + buf = make([]byte, nb) + copy(buf, cb.buf[cb.index:]) + cb.index = end + return +} + +// ReadGroup reads the input until a "group end" tag is found +// and returns the data up to that point. Subsequent reads from +// the buffer will read data after the group end tag. If alloc +// is true, the data is copied to a new slice before being returned. +// Otherwise, the returned slice is a view into the buffer's +// underlying byte slice. +// +// This function correctly handles nested groups: if a "group start" +// tag is found, then that group's end tag will be included in the +// returned data. +func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) { + var groupEnd, dataEnd int + groupEnd, dataEnd, err := cb.findGroupEnd() + if err != nil { + return nil, err + } + var results []byte + if !alloc { + results = cb.buf[cb.index:dataEnd] + } else { + results = make([]byte, dataEnd-cb.index) + copy(results, cb.buf[cb.index:]) + } + cb.index = groupEnd + return results, nil +} + +// SkipGroup is like ReadGroup, except that it discards the +// data and just advances the buffer to point to the input +// right *after* the "group end" tag. +func (cb *Buffer) SkipGroup() error { + groupEnd, _, err := cb.findGroupEnd() + if err != nil { + return err + } + cb.index = groupEnd + return nil +} + +// SkipField attempts to skip the value of a field with the given wire +// type. When consuming a protobuf-encoded stream, it can be called immediately +// after DecodeTagAndWireType to discard the subsequent data for the field. +func (cb *Buffer) SkipField(wireType int8) error { + switch wireType { + case proto.WireFixed32: + if err := cb.Skip(4); err != nil { + return err + } + case proto.WireFixed64: + if err := cb.Skip(8); err != nil { + return err + } + case proto.WireVarint: + // skip varint by finding last byte (has high bit unset) + i := cb.index + limit := i + 10 // varint cannot be >10 bytes + for { + if i >= limit { + return ErrOverflow + } + if i >= len(cb.buf) { + return io.ErrUnexpectedEOF + } + if cb.buf[i]&0x80 == 0 { + break + } + i++ + } + // TODO: This would only overflow if buffer length was MaxInt and we + // read the last byte. This is not a real/feasible concern on 64-bit + // systems. Something to worry about for 32-bit systems? Do we care? + cb.index = i + 1 + case proto.WireBytes: + l, err := cb.DecodeVarint() + if err != nil { + return err + } + if err := cb.Skip(int(l)); err != nil { + return err + } + case proto.WireStartGroup: + if err := cb.SkipGroup(); err != nil { + return err + } + default: + return ErrBadWireType + } + return nil +} + +func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) { + start := cb.index + defer func() { + cb.index = start + }() + for { + fieldStart := cb.index + // read a field tag + _, wireType, err := cb.DecodeTagAndWireType() + if err != nil { + return 0, 0, err + } + if wireType == proto.WireEndGroup { + return cb.index, fieldStart, nil + } + // skip past the field's data + if err := cb.SkipField(wireType); err != nil { + return 0, 0, err + } + } +} diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/encode.go b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go new file mode 100644 index 00000000..524f1bcb --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go @@ -0,0 +1,147 @@ +package codec + +import ( + "github.com/golang/protobuf/proto" +) + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (cb *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + cb.buf = append(cb.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + cb.buf = append(cb.buf, uint8(x)) + return nil +} + +// EncodeTagAndWireType encodes the given field tag and wire type to the +// buffer. This combines the two values and then writes them as a varint. +func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error { + v := uint64((int64(tag) << 3) | int64(wireType)) + return cb.EncodeVarint(v) +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (cb *Buffer) EncodeFixed64(x uint64) error { + cb.buf = append(cb.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (cb *Buffer) EncodeFixed32(x uint64) error { + cb.buf = append(cb.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (cb *Buffer) EncodeRawBytes(b []byte) error { + if err := cb.EncodeVarint(uint64(len(b))); err != nil { + return err + } + cb.buf = append(cb.buf, b...) + return nil +} + +// EncodeMessage writes the given message to the buffer. +func (cb *Buffer) EncodeMessage(pm proto.Message) error { + bytes, err := marshalMessage(cb.buf, pm, cb.deterministic) + if err != nil { + return err + } + cb.buf = bytes + return nil +} + +// EncodeDelimitedMessage writes the given message to the buffer with a +// varint-encoded length prefix (the delimiter). +func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error { + bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic) + if err != nil { + return err + } + // save truncated buffer if it was grown (so we can re-use it and + // curtail future allocations) + if cap(bytes) > cap(cb.tmp) { + cb.tmp = bytes[:0] + } + return cb.EncodeRawBytes(bytes) +} + +func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) { + // We try to use the most efficient way to marshal to existing slice. + + if deterministic { + // see if the message has custom deterministic methods, preferring an + // "append" method over one that must always re-allocate + madm, ok := pm.(interface { + MarshalAppendDeterministic(b []byte) ([]byte, error) + }) + if ok { + return madm.MarshalAppendDeterministic(b) + } + + mdm, ok := pm.(interface { + MarshalDeterministic() ([]byte, error) + }) + if ok { + bytes, err := mdm.MarshalDeterministic() + if err != nil { + return nil, err + } + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil + } + + var buf proto.Buffer + buf.SetDeterministic(true) + if err := buf.Marshal(pm); err != nil { + return nil, err + } + bytes := buf.Bytes() + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil + } + + mam, ok := pm.(interface { + // see if we can append the message, vs. having to re-allocate + MarshalAppend(b []byte) ([]byte, error) + }) + if ok { + return mam.MarshalAppend(b) + } + + // lowest common denominator + bytes, err := proto.Marshal(pm) + if err != nil { + return nil, err + } + if len(b) == 0 { + return bytes, nil + } + return append(b, bytes...), nil +} diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go new file mode 100644 index 00000000..777c3a43 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go @@ -0,0 +1,127 @@ +// Package internal contains some code that should not be exported but needs to +// be shared across more than one of the protoreflect sub-packages. +package internal + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +// TODO: replace this alias configuration with desc.RegisterImportPath? + +// StdFileAliases are the standard protos included with protoc, but older versions of +// their respective packages registered them using incorrect paths. +var StdFileAliases = map[string]string{ + // Files for the github.com/golang/protobuf/ptypes package at one point were + // registered using the path where the proto files are mirrored in GOPATH, + // inside the golang/protobuf repo. + // (Fixed as of https://github.com/golang/protobuf/pull/412) + "google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any/any.proto", + "google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration/duration.proto", + "google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty/empty.proto", + "google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct/struct.proto", + "google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", + "google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", + // Files for the google.golang.org/genproto/protobuf package at one point + // were registered with an anomalous "src/" prefix. + // (Fixed as of https://github.com/google/go-genproto/pull/31) + "google/protobuf/api.proto": "src/google/protobuf/api.proto", + "google/protobuf/field_mask.proto": "src/google/protobuf/field_mask.proto", + "google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto", + "google/protobuf/type.proto": "src/google/protobuf/type.proto", + + // Other standard files (descriptor.proto and compiler/plugin.proto) are + // registered correctly, so we don't need rules for them here. +} + +func init() { + // We provide aliasing in both directions, to support files with the + // proper import path linked against older versions of the generated + // files AND files that used the aliased import path but linked against + // newer versions of the generated files (which register with the + // correct path). + + // Get all files defined above + keys := make([]string, 0, len(StdFileAliases)) + for k := range StdFileAliases { + keys = append(keys, k) + } + // And add inverse mappings + for _, k := range keys { + alias := StdFileAliases[k] + StdFileAliases[alias] = k + } +} + +type ErrNoSuchFile string + +func (e ErrNoSuchFile) Error() string { + return fmt.Sprintf("no such file: %q", string(e)) +} + +// LoadFileDescriptor loads a registered descriptor and decodes it. If the given +// name cannot be loaded but is a known standard name, an alias will be tried, +// so the standard files can be loaded even if linked against older "known bad" +// versions of packages. +func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) { + fdb := proto.FileDescriptor(file) + aliased := false + if fdb == nil { + var ok bool + alias, ok := StdFileAliases[file] + if ok { + aliased = true + if fdb = proto.FileDescriptor(alias); fdb == nil { + return nil, ErrNoSuchFile(file) + } + } else { + return nil, ErrNoSuchFile(file) + } + } + + fd, err := DecodeFileDescriptor(file, fdb) + if err != nil { + return nil, err + } + + if aliased { + // the file descriptor will have the alias used to load it, but + // we need it to have the specified name in order to link it + fd.Name = proto.String(file) + } + + return fd, nil +} + +// DecodeFileDescriptor decodes the bytes of a registered file descriptor. +// Registered file descriptors are first "proto encoded" (e.g. binary format +// for the descriptor protos) and then gzipped. So this function gunzips and +// then unmarshals into a descriptor proto. +func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) { + raw, err := decompress(fdb) + if err != nil { + return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err) + } + fd := descriptorpb.FileDescriptorProto{} + if err := proto.Unmarshal(raw, &fd); err != nil { + return nil, fmt.Errorf("bad descriptor for %q: %v", element, err) + } + return &fd, nil +} + +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + return out, nil +} diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go new file mode 100644 index 00000000..25376c7b --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go @@ -0,0 +1,20 @@ +package internal + +import ( + "github.com/golang/protobuf/proto" +) + +// GetUnrecognized fetches the bytes of unrecognized fields for the given message. +func GetUnrecognized(msg proto.Message) []byte { + return proto.MessageReflect(msg).GetUnknown() +} + +// SetUnrecognized adds the given bytes to the unrecognized fields for the given message. +func SetUnrecognized(msg proto.Message, data []byte) { + refl := proto.MessageReflect(msg) + existing := refl.GetUnknown() + if len(existing) > 0 { + data = append(existing, data...) + } + refl.SetUnknown(data) +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index c74fc20f..fe8c8479 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x -// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index e5faa375..ce48b1dd 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index 248a3824..b908696b 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 4cfa5438..1f539388 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public // domain sources at https://github.com/gvanas/KeccakCodePackage @@ -320,9 +319,9 @@ MOVQ rDi, _si(oState); \ MOVQ rDo, _so(oState) \ -// func keccakF1600(state *[25]uint64) +// func keccakF1600(a *[25]uint64) TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ state+0(FP), rpState + MOVQ a+0(FP), rpState // Convert the user state into an internal state NOTQ _be(rpState) diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index 8b4453aa..addfd504 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.4 -// +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index ec26f147..b4fbbf86 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package sha3 @@ -144,6 +143,12 @@ func (s *asmState) Write(b []byte) (int, error) { // Read squeezes an arbitrary number of bytes from the sponge. func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + n = len(out) // need to pad if we were absorbing @@ -203,8 +208,17 @@ func (s *asmState) Sum(b []byte) []byte { // Hash the buffer. Note that we don't clear it because we // aren't updating the state. - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } } // Reset resets the Hash to its initial state. diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s index a0e051b0..826b862c 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 5c0710ef..8d31cf5b 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x -// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 59c8eb94..7337cca8 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !386 && !ppc64le) || purego -// +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 1ce60624..870e2d16 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (amd64 || 386 || ppc64le) && !purego -// +build amd64 386 ppc64le -// +build !purego package sha3 diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d..3a7e5ab1 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938..3c57880d 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85..9b4de940 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8d..e6f55cbd 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..105c3b27 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1510,19 +1513,18 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1565,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1577,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1595,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b03..00000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab..00000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b3..00000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa81..00000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7..00000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c9..00000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984f..3b9f06b9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index de60fa88..c5d08108 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -581,9 +582,11 @@ type serverConn struct { advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push + curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream + unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -729,11 +732,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -922,7 +921,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -981,6 +980,8 @@ func (sc *serverConn) serve() { return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() + case handlerDoneMsg: + sc.handlerDone() default: panic("unknown timer") } @@ -1020,6 +1021,7 @@ var ( idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) + handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1476,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1632,7 +1639,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2012,13 +2019,12 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - go sc.runHandler(rw, req, handler) - return nil + return sc.scheduleHandler(id, rw, req, handler) } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2034,10 +2040,14 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } + // This is the first request on the connection, + // so start the handler directly rather than going + // through scheduleHandler. + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2108,7 +2118,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2278,8 +2288,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response return &responseWriter{rws: rws} } +type unstartedHandler struct { + streamID uint32 + rw *responseWriter + req *http.Request + handler func(http.ResponseWriter, *http.Request) +} + +// scheduleHandler starts a handler goroutine, +// or schedules one to start as soon as an existing handler finishes. +func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { + sc.serveG.check() + maxHandlers := sc.advMaxStreams + if sc.curHandlers < maxHandlers { + sc.curHandlers++ + go sc.runHandler(rw, req, handler) + return nil + } + if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { + return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) + } + sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ + streamID: streamID, + rw: rw, + req: req, + handler: handler, + }) + return nil +} + +func (sc *serverConn) handlerDone() { + sc.serveG.check() + sc.curHandlers-- + i := 0 + maxHandlers := sc.advMaxStreams + for ; i < len(sc.unstartedHandlers); i++ { + u := sc.unstartedHandlers[i] + if sc.streams[u.streamID] == nil { + // This stream was reset before its goroutine had a chance to start. + continue + } + if sc.curHandlers >= maxHandlers { + break + } + sc.curHandlers++ + go sc.runHandler(u.rw, u.req, u.handler) + sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references + } + sc.unstartedHandlers = sc.unstartedHandlers[i:] + if len(sc.unstartedHandlers) == 0 { + sc.unstartedHandlers = nil + } +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() @@ -2487,7 +2551,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2607,7 +2670,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2628,7 +2690,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2640,9 +2701,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2858,14 +2916,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2930,19 +2986,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3125,6 +3172,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 00000000..61075bd1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c..2fa49490 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -861,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1018,7 +1106,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -1056,7 +1144,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1156,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1168,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1206,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1303,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1321,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1398,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1471,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1510,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1594,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1766,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2028,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2085,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2311,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2399,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2434,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2851,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3034,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3078,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3131,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3331,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } @@ -3201,3 +3399,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go index 6c3ebfae..f625483f 100644 --- a/vendor/golang.org/x/net/icmp/helper_posix.go +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package icmp diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go index 6aea8047..b7cb15b7 100644 --- a/vendor/golang.org/x/net/icmp/listen_posix.go +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package icmp diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go index 1acfb74b..7b76be1c 100644 --- a/vendor/golang.org/x/net/icmp/listen_stub.go +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package icmp diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338d..712f1ad8 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85f..7b371788 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698ce..cc6a892a 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1..40e74bb3 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef4..c6c2bf10 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba7..76789393 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bc..0600cd2a 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701ead..2fb768ef 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778..5ff05fe1 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b533..0f25e84c 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904..8a75b966 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc..fa45bb90 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 4bdaaaf1..33a5bf59 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go index 0d30e0a0..68f438c8 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go index 4936e8a6..058ea8de 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux -// +build arm mips mipsle 386 ppc -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index f6877f98..3ca0d3a0 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go index d3dbe1b8..6d0e426c 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index 1d9f2ed6..7ca9cb7e 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go index 19d46789..0211f225 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go index 5b1d50ae..2038f290 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go index be634095..70e6f448 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || windows || zos -// +build aix windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/empty.s b/vendor/golang.org/x/net/internal/socket/empty.s index 90ab4ca3..49d79791 100644 --- a/vendor/golang.org/x/net/internal/socket/empty.s +++ b/vendor/golang.org/x/net/internal/socket/empty.s @@ -3,6 +3,5 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 // This exists solely so we can linkname in symbols from syscall. diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go index 78f41290..7a5cc5c4 100644 --- a/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go index 2b8fbb3f..340e53fb 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && (darwin || dragonfly || freebsd || linux || netbsd || openbsd) -// +build arm mips mipsle 386 ppc -// +build darwin dragonfly freebsd linux netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index 2e94e96f..26470c19 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos) -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build aix darwin dragonfly freebsd linux netbsd openbsd zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go index f7da2bc4..8859ce10 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go index 14caf524..da886b03 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go index 113e773c..4825b21e 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && !netbsd -// +build !aix,!linux,!netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 41883c53..311fd2c7 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || netbsd -// +build aix linux netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go index 25f6847f..ebff4f6e 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index 5b8e00f1..62e6fe86 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd -// +build aix darwin dragonfly freebsd netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go index b4658fba..3dd07250 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux -// +build arm mips mipsle 386 ppc -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 42411aff..5af9ddd6 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go index 3098f5d7..e212b50f 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index eb79151f..e8767764 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go index 324e9ee7..529db68e 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && zos -// +build s390x,zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/norace.go b/vendor/golang.org/x/net/internal/socket/norace.go index de0ad420..8af30ecf 100644 --- a/vendor/golang.org/x/net/internal/socket/norace.go +++ b/vendor/golang.org/x/net/internal/socket/norace.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !race -// +build !race package socket diff --git a/vendor/golang.org/x/net/internal/socket/race.go b/vendor/golang.org/x/net/internal/socket/race.go index f0a28a62..9afa9580 100644 --- a/vendor/golang.org/x/net/internal/socket/race.go +++ b/vendor/golang.org/x/net/internal/socket/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build race -// +build race package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 8f79b38f..04313907 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index f7d0b0d2..7c0d7410 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go index 02f32855..e363fb5a 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index dd785877..ff7a8baf 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go index b258879d..e7664d48 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go index 5d99f237..d7627f87 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go index 76f5b8ae..08d49107 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !s390x && !386 -// +build linux,!s390x,!386 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go index af964e61..1d182470 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 -// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go index 5b128fbb..0e407d12 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 -// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 42b8f234..58d86548 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 7cfb349c..2e5b473c 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go index de823932..93058db5 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go index 00691bd5..45bab004 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go index 6a94fec2..b6fc15a1 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go index c066272d..e67fc3cb 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..dc5225b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go index b7385dfd..c88da8cb 100644 --- a/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go index 0e748dbd..14ae2dae 100644 --- a/vendor/golang.org/x/net/ipv4/control_pktinfo.go +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || solaris -// +build darwin linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go index f27322c3..3ba66116 100644 --- a/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go index 2413e02f..2e765548 100644 --- a/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go index cd4ee6e1..c2c4ce7f 100644 --- a/vendor/golang.org/x/net/ipv4/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index 1bb370e2..91c685e8 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 53f0794e..2afd4b50 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go index eb07c1c0..82e2c378 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go index cf036893..840108bf 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go index 02730cdf..9244a68a 100644 --- a/vendor/golang.org/x/net/ipv4/sys_aix.go +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -4,7 +4,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 22322b38..645f254c 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go index fde64014..48cfb6db 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go index 54eb9901..0b27b632 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux -// +build darwin freebsd linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index dcb15f25..303a5e2e 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux -// +build !darwin,!freebsd,!linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go index fb11e324..1b4780df 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go index fc53a0d3..b1f779b4 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go index e191b2f1..b7b032d2 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build netbsd || openbsd -// +build netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go index 6a4e7abf..a295e15e 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux || solaris -// +build darwin freebsd linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go index 157159fd..74bd454e 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux && !solaris -// +build !darwin,!freebsd,!linux,!solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go index d5508516..20af4074 100644 --- a/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go index b7f2d6e5..dd454025 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go index e15c22c7..54f9e139 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go index e2edebdb..78374a52 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go index 2733ddbe..a8f04e7b 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index 9c90844a..51fbbb1f 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go index b7e8643f..eb28ce75 100644 --- a/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go index 63e475db..9c73b864 100644 --- a/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go index 120bf877..2814534a 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go index d60136a9..c92c9b51 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index b0692e43..be04e4d6 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index cd0ff508..29b9ccf6 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 37c62871..34dfed58 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go index 32fd8664..a09c3aaf 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go index a47182af..93c8efc4 100644 --- a/vendor/golang.org/x/net/ipv6/sys_aix.go +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -4,7 +4,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go index 6ff9950d..5c9cb444 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go index 485290cb..dc704946 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go index b5661fb8..e39f75f4 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go index cb006618..8532a8f5 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go index bde41a6c..9f3bc2af 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build dragonfly || netbsd || openbsd -// +build dragonfly netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index 023488a4..b40f5c68 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || solaris || zos -// +build aix darwin freebsd linux solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index acdf2e5c..6526aad5 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !freebsd && !linux && !solaris && !zos -// +build !aix,!darwin,!freebsd,!linux,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go index 5807bba3..76602c34 100644 --- a/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go index f604b0f3..668716df 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go index 598fbfa0..6a53284d 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go index d4f78e40..13b34720 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package ipv6 diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..d6c71101 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// addMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..eae2a99f --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Since(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b18efb74..948a3ee6 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go index 7d419d37..f93c740b 100644 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.20 -// +build go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go index 1795c18a..88ce3343 100644 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.20 -// +build !go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..b618162a --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,160 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + done := ctx.Done() + + s.mu.Lock() + select { + case <-done: + // ctx becoming done has "happened before" acquiring the semaphore, + // whether it became done before the call began or while we were + // waiting for the mutex. We prefer to fail even if we could acquire + // the mutex without blocking. + s.mu.Unlock() + return ctx.Err() + default: + } + if s.size-s.cur >= n && s.waiters.Len() == 0 { + // Since we hold s.mu and haven't synchronized since checking done, if + // ctx becomes done before we return here, it becoming done must have + // "happened concurrently" with this call - it cannot "happen before" + // we return in this branch. So, we're ok to always acquire here. + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-done + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-done: + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. + // Pretend we didn't and put the tokens back. + s.cur -= n + s.notifyWaiters() + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return ctx.Err() + + case <-ready: + // Acquired the semaphore. Check that ctx isn't already done. + // We check the done channel instead of calling ctx.Err because we + // already have the channel, and ctx.Err is O(n) with the nesting + // depth of ctx. + select { + case <-done: + s.Release(n) + return ctx.Err() + default: + } + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f..8fa707aa 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 8aaeef54..9bf0c32e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993b..0e27a21e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index c61f95a0..22cc9984 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" @@ -30,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index ccf542a7..6ac6e1ef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -3,10 +3,10 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index 0af2f248..c8ae6ddc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index fa7cdb9b..910728fb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 2aff3189..7f194678 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go index 4bfbda61..9526d2ce 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index 6cc73109..3f73a05d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo #include #include diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 863d415a..99c60fe9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 159a686f..743eb543 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !386 && !amd64 && !amd64p32 && !arm64 -// +build !386,!amd64,!amd64p32,!arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80f..3d386d0f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index 6000db4c..4686c1d5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index f4992b1a..cd63e733 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 021356d6..197188e6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 0f57b05b..55863585 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 -// +build loong64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index f4063c66..fedb00cc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips64 || mips64le -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index 07c4e36d..ffb4ec7e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips || mipsle -// +build mips mipsle package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go index d7b4fb4c..e9ecf2a4 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && arm -// +build !linux,arm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f3cde129..5341e7f8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go index 0dafe964..5f8f2419 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && (mips64 || mips64le) -// +build !linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go index 060d46b6..89608fba 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && (ppc64 || ppc64le) -// +build !aix -// +build !linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go index dd10eb79..5ab87808 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && riscv64 -// +build !linux,riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go index 4e8acd16..c14f12b1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 || ppc64le -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index ff7da60e..7f0c79c0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 -// +build riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s index 96f81e20..1fb4b701 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 7747d888..384787ea 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build wasm -// +build wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 2dcde828..c29f5e4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 -// +build 386 amd64 amd64p32 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index 39acab2f..7d7ba33e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go index 93ce03a3..7fe04b0a 100644 --- a/vendor/golang.org/x/sys/cpu/endian_big.go +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go index 55db853e..48eccc4c 100644 --- a/vendor/golang.org/x/sys/cpu/endian_little.go +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go index d87bd6b3..4cd64c70 100644 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 -// +build linux,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go index b975ea2a..4c9788ea 100644 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.21 -// +build go1.21 package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index 96134157..1b9ccb09 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -9,7 +9,6 @@ // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo -// +build aix,gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index 904be42f..e8b6cdbe 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -7,7 +7,6 @@ // (See golang.org/issue/32102) //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package cpu diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go index 2000064a..5627d70e 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.19 -// +build !go1.19 package execabs diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index f364b341..d60ab1b4 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.19 -// +build go1.19 package execabs diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c10..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3..a4fcef0e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349..1e63615c 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4ad..6496c310 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a739..4fd1f54d 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a848..42f7eb9e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019e..f8902667 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d0..3b473487 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43..67e29f31 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d514..d6ae269c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae0276..01e5e253 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 56535728..2abf12f6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2c..f84bae71 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c..f08f6280 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d49893..bdfc024d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d..2e8c9961 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab33..2c394b11 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169..fab586a2 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c..f949ec54 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e185..813dfad7 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,18 +3,17 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -22,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 00000000..39d647d8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 00000000..4bd4a179 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb..a0865789 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965..6fb7cb77 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a99850..d7851346 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0..623a5e69 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a..bb6a64fe 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977..1ebf1178 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a5202655..1095fd31 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4a..b9f0e277 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d1..a96da71f 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index cedaf7e0..00000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x -// +build zos,s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b99125..6200876f 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808..13b4acd5 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94..9e83d18c 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index e377cc9f..00000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x -// +build zos,s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d7..aca5721d 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c54..d468b7b4 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3..972d61bd 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a51..848840ae 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c085..dbe680ea 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580..5b0759bd 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1..20f470b9 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf7..c8b2a750 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7e..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -519,6 +521,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +563,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -581,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || @@ -602,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || @@ -663,7 +669,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca051363..7f602ffd 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa..fd45fe52 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5..0482408d 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b..6a09af53 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff..3f0975f3 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a0..a4d35db5 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec..714d2aae 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322..4a9f6634 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d625756..dbd2b6cc 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c..b903c006 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin -// +build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f..c3a62dbb 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a..4a1eab37 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 00000000..3e53dbc0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 00000000..3c4f33cb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c838..5ea74da9 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cda..67ce6cef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa9..1fdaa476 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0..c87f9a9f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de51..a00c3e54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -317,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce..0eaecf5f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec996..f36c6707 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 -// +build darwin,go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d3212..14bab6b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da5100..3967bca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c48..eff19ada 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093..4f24b517 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0..ac30759e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b3..aab725ca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd467..ba46651f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e..df89f9e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a..a863f705 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e5022..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1301,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -1840,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -2482,3 +2590,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945e..506dafa7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6..38d55641 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce3..d557cf8d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3a..facdb83b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da298641..cd2dd797 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689..cf2ee6c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7..ffc4c2b6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb48..9ebfdcf4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fcc..5f2b57c4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af24..d1a3ad82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a1229..f2f67423 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec..3d0e9845 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d2..70963a95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ff..c218ebd2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a..e6c48500 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa257..7286a9aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee1..6f5a2889 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5..66f31210 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae..11d1f169 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282..7a5eb574 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e..62d8957a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f..ce6a0688 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd8..d46d689d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b..b25343c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -171,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -326,4 +335,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdc..9ddc89f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360..70a3c96e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d..265caa87 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99..ac4fda17 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f4..0a451e6d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139..30a308cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7f..ea954330 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa13..21974af0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef8..e02d8cea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda270..77081de8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca5..05c95bcc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707ac..23f39b7a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041..312ae6ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,13 +3,22 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x + +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -18,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -66,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -75,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -89,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -147,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -156,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -178,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -191,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -205,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -213,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -221,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -237,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -273,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -289,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -318,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -333,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -361,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -404,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -521,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -572,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -601,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1105,7 +1882,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { @@ -1187,67 +1964,46 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -func Opendir(name string) (uintptr, error) { - p, err := BytePtrFromString(name) - if err != nil { - return 0, err - } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err } - return + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) + } + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1262,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1308,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1373,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 } - return Fstatfs(fd, stat) + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1396,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1417,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1475,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1779,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1897,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1911,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1957,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437..4fcd38de 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17..672d6b0a 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux -// +build darwin,!ios linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefd..8b7977a2 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios -// +build darwin,!ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b28..7997b190 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae7..cb7e598c 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f3..e1687939 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b7..2fb219d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26..b0e6f5c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 14300762..e40fa852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a74..bb02aa6c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e4..c0e0f869 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c513..6c692390 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3b..dd9163f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69de..493a2a79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2d..8b437b30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d96..67c02dd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479..93a38a97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,14 +480,18 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -521,6 +524,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +780,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1692,12 +1698,14 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1779,6 +1787,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -1795,6 +1805,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -1889,6 +1900,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2120,6 +2132,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2239,6 +2305,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -2275,6 +2342,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -2403,6 +2471,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2607,8 +2676,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2851,9 +2921,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3013,6 +3112,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3072,6 +3172,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3461,18 +3562,24 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa512..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go @@ -283,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cd..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go @@ -289,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go @@ -279,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d3..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd9..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go @@ -282,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go @@ -337,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go @@ -341,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go @@ -341,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 @@ -270,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go @@ -345,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c633..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go @@ -336,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d..130085df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c0..84769a1a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749..602ded00 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba19..efc0406e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474..5a6500f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2..a5aeeb97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e..0e9748a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe75..4f4449ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe3..76a363f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1..43ca0cdf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d40303..b1b8bb20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a0..d2ddd317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506..da08b2ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. @@ -11,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -153,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -166,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -183,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -206,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -249,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -286,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -379,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -428,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -453,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -477,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -701,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -744,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca2..586317c7 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f7943..d7c881be 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e6..2d2de5d2 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e..5adc79fb 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 00000000..b77ff5db --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d233..6ea64a3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18ad..99ee4399 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae5..b68a7836 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b0..0a87450b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e..ccb02f24 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0..1b40b997 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64..aad65fc7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d14..c0096391 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508acca..7664df74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead..ae099182 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dca..11fd5d45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 58184919..c3d2d653 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd1..c698cbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec7..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -892,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -2195,3 +2219,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc..4def3e9f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf8..fef2bc8b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c..a9fd76a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe..46006502 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e..c8987d26 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb..921f4306 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e..44f06782 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718..e7fa0abf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bb..8c512567 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf9..7392fd45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f4099026..41180434 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc299..40c6ce7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed..2cfe34ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde3223..61e6f070 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65b..834b8420 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5ba..e91ebc14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556ba..be28babb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917..fb587e82 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2d..d576438b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b645..9dc42410 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30..41b56173 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc11..0d3a0751 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b..4019a656 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb32..c39f7776 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c9223140..ac4af24f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fd..57571d07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c9..f77d5321 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de50..e62963e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bcea..fae140b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99a..00831354 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f7660..9d1e0ff0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -801,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360..79029ed5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b..da115f9a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b4018946..829b87fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4..7ccf66b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,23 +1,105 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS return } @@ -30,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -47,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -79,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -89,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -100,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -110,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -120,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -130,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -141,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -151,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -161,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -177,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -194,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -204,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -215,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -226,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -237,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -247,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -257,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -272,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -287,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -302,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -317,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -332,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -343,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -354,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) -func unmount(filesystem string, mtm int) (err error) { +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -972,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -982,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -992,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1002,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1012,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1023,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1033,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1043,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1058,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1078,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1100,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1110,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1120,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1130,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1143,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1158,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1173,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1189,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1199,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1210,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1220,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1235,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e04847..3a58ae81 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf8..dcb7a0eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd..db5a7bf1 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4..7be575a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a..d6e3174c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e4405447..ee97157d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fc..35c3b91d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9..5edda768 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bb..0dc9e8b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0..308ddf3a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d9610..418664e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b83..34d0b86d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc10..b71cf45e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc427..e32df1c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0..15ad6111 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d..53aef5dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4..71d52476 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37e..c7477061 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff8..f96e214f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f71..28425346 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3..d0953018 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,14 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a..295c7f4b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,14 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f498..d1a9eaca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,14 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcad..bec157c3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,14 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b255..7ee7bdc4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d..fad1f25b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e..7d3e1635 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d6..0ed53ad9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc..2fba04ad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761..621d00d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,14 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eb..b2aa8cd4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f..524a1b1c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6d..d59b943a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952ef..31e771d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 59773381..9fd77c6c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af2918..af10af28 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a9..cc2028af 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef591..c06dd441 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01..9ddbf3e0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa2..19a6ee41 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0e..05192a78 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad4..5e8c263c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2670 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1..3e6d57ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c..3a219bdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3..091d107f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10e..28ff4ef7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b..30e405bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc4833..6cbd094a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b289..7c03b6ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb1..422107ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a8..505a12ac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975..cc986c79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b4..0036746e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -175,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -456,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -552,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -833,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1166,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1186,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1547,6 +1560,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1562,6 +1576,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1609,6 +1624,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1646,6 +1664,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1667,6 +1693,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1690,9 +1719,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1723,6 +1765,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1737,6 +1781,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1749,6 +1794,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1778,6 +1825,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1793,6 +1843,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1851,8 +1902,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1882,6 +1941,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -2418,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2672,6 +2745,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2716,6 +2790,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2726,6 +2805,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2743,6 +2834,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2763,6 +2856,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2779,6 +2873,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2849,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -2867,6 +2963,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2902,6 +3000,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2980,6 +3079,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 @@ -3116,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3368,7 +3473,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4152,7 +4257,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -4499,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x149 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4765,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4869,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5103,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1f NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5516,7 +5622,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 @@ -5883,3 +5989,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc..fd402da4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -478,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c68..eb7a5e18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -493,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c..d78ac108 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -471,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc421..cd06d47f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -472,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc..2f28fe26 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -473,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7cc..71d6cac2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -477,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd33..8596d453 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -475,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77..cd60ea18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -475,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b875..b0ae420c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -477,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840d..83597287 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -483,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 03487439..69eb6a5c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -482,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad06704..5f583cb6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -482,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c3..15adc041 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -500,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d02..cf3ce900 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -496,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6..590b5673 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -477,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9..f22e7947 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655..066a7d83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a1..439548ec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151..16085d3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b..afd13a3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a547988..5d97f1f9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1..34871cdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266..5911bceb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb1..e4f24f3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c01..ca50a793 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a4..d7d7f790 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c..14160576 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb..d9a13af4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. @@ -26,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -70,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -326,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -337,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -413,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea6..16f90560 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 -// +build windows,go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index fdbbbcd3..00000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645..6c366955 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c..dbcdb090 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089..0f1bdc38 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817..0c78da78 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 6c8d97b6..fd863244 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package registry provides access to the Windows registry. // diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go index ee74927d..bbf86ccf 100644 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package registry diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index 41733512..f533091c 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 2789f6f1..74db26b9 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b96..a9dc6308 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434..6a4f9ce6 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb9..e85ed6b9 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57c..6525c62f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } @@ -155,6 +154,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -164,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -192,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -233,6 +236,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -345,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -969,7 +984,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1830,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c8..d8cb71db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 @@ -3354,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de..5c6035dd 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,9 +184,12 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -211,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -235,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -253,6 +260,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -320,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -329,9 +338,13 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -347,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -357,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -375,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1604,6 +1620,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -1628,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1832,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1844,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2045,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2185,6 +2258,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2789,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2870,6 +2959,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2895,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -3023,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3108,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3254,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4..784bb880 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a9200..8e1e9439 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72..d2bd7118 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea..f76bdca2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b..3aa2c3bd 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7be..a7137579 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce580..f15746f7 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb2..c164d379 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a07882..1af161c7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c4..eb73ecc3 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b27330..276cb8d8 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8..0cceffd7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b87..b0819e42 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae5..bf65457d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 00000000..a6b50818 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,203 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000..e491a9e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..9d4213eb --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000..608aa6e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 00000000..d6ff2674 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 00000000..c6672c0a --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,28 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000..1f896092 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,46 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + proto \ + test \ + testrace \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 00000000..53019774 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000..ab0fbb79 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,107 @@ +# gRPC-Go + +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + + +```go +import "google.golang.org/grpc" +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 00000000..be6e1087 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 00000000..52d530d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import ( + "fmt" + "strings" +) + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. +type Attributes struct { + m map[any]any +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000..29475e31 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 00000000..0787d0b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000..d79560a2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,442 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + + logger = grpclog.Component("balancer") +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 00000000..a7f1eeec --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,264 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + state: connectivity.Connecting, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call Shutdown for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 00000000..e31d76e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 00000000..c3341358 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 00000000..4ecfa1c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000..f7031ad2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: uint32(grpcrand.Intn(len(scs))), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + next uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 00000000..b5e30cff --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,380 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + }) + if !ok { + return nil + } + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 00000000..e9e97d45 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identity. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000..788c89c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 00000000..32b7fa57 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000..f6e815e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1876 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + // Apply dial options. + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { + return err + } + + cc.addTraceEvent("exiting idle mode") + return nil +} + +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { + cc.mu.Lock() + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() + cc.csMgr.updateState(connectivity.Idle) + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + return nil +} + +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID *channelz.Identifier + pubSub *grpcsync.PubSub +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + csm.pubSub.Publish(state) + + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + idlenessMgr *idle.Manager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + pickerWrapper *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + } +} + +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLBLocked(s.ServiceConfig) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + bw := cc.balancerWrapper + cc.mu.Unlock() + + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: copyAddressesWithoutBalancerAttributes(addrs), + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err + } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.conns[ac] = struct{}{} + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } + ac.mu.Unlock() + return nil + } + ac.mu.Unlock() + + ac.resetTransport() + return nil +} + +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.mu.Lock() + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return + } + + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } + } + + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil + } + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) + } + + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + var newBalancerName string + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB + } + cc.balancerWrapper.switchTo(newBalancerName) +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + cc.resolverWrapper.resolveNow(o) + cc.mu.RUnlock() +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. + cc.mu.Unlock() + + cc.resolverWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw *acBalancerWrapper + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID *channelz.Identifier + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) + ac.state = s + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } + ac.acbw.updateState(s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + ac.mu.Lock() + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + ac.mu.Lock() + if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() + return + } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + if ctx.Err() != nil { + return errConnClosing + } + ac.mu.Lock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(ctx, addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) + + onClose := func(r transport.GoAwayReason) { + ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + } + + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + defer cancel() + copts.ChannelzParentID = ac.channelzID + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } + // newTr is either nil, or closed. + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err + } + + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + if authorityFromDialOption != "" { + cc.authority = authorityFromDialOption + } else if authorityFromCreds != "" { + cc.authority = authorityFromCreds + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { + cc.authority = "localhost" + endpoint + } else { + cc.authority = encodeAuthority(endpoint) + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000..411e3dfd --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 00000000..4cdc6ba7 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 00000000..934fac2b --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 00000000..08476ad1 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,250 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000..4a899264 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000..5feac3aa --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 00000000..82bee144 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 00000000..5dafd34e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "os" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup(t.State.CipherSuite), + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 00000000..ba242618 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,718 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +var globalDialOptions []DialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + " " + grpcUA + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id *channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + copts: transport.ConnectOptions{ + ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + UseProxy: true, + UserAgent: grpcUA, + }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000..0022859a --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 00000000..5ebf88d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 00000000..0ee3d3ba --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v any) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 00000000..ac73c9ce --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000..16928c9c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...any) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...any) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...any) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...any) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...any) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...any) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...any) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...any) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...any) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...any) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...any) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...any) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...any) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000..b1674d82 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000..ecfd36d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,258 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000..877d78fc --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 00000000..fed1c011 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "context" + "errors" + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 00000000..3c594e6e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 00000000..94a08d68 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) any +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) any { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 00000000..755fdebc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,192 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type Logger interface { + GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.GetMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { + // Max length of header and message. + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} +} + +type logger struct { + config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } + l.config.All = ml + return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) + } + l.config.Services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) + } + l.config.Methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) + } + l.config.Blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if l.config.All == nil { + return nil + } + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 00000000..1ee00a39 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 00000000..f9e80e27 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 00000000..0f31274a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,445 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "context" + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *binlogpb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type LogEntryConfig interface { + toProto() *binlogpb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &binlogpb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &binlogpb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = binlogpb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = binlogpb.Address_TYPE_IPV6 + } else { + ret.Type = binlogpb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = binlogpb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = binlogpb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 00000000..264de387 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*binlogpb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 00000000..11f91668 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,116 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import ( + "errors" + "sync" +) + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan any + closed bool + closing bool + mu sync.Mutex + backlog []any +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan any, 1)} +} + +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t any) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return errBufferClosed + } + if len(b.backlog) == 0 { + select { + case b.c <- t: + return nil + default: + } + } + b.backlog = append(b.backlog, t) + return nil +} + +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } else if b.closing && !b.closed { + close(b.c) + } +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { + return b.c +} + +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return + } + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 00000000..fc094f34 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,763 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "errors" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) + IDGen.Reset() + atomic.StoreInt32(&curState, 1) + } +} + +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.LoadInt32(&curState) == 1 +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { + id := IDGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: parent, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid.Int(), + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return newIdentifer(RefServer, id, nil) +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + s := make([]*SocketMetric, 0, len(sks)) + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { + id int64 +} + +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 00000000..c9a27acd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 00000000..f89e6f77 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 00000000..1d4020f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + clearCalled bool + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 00000000..1b1c4cce --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 00000000..8b06eed1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,43 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 00000000..98288c3f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 00000000..b5568b22 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,27 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 00000000..9deee7f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 00000000..25ade623 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 00000000..2919632d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 00000000..f792fd22 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 00000000..685a3cb4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strconv" + "strings" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) +) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 00000000..dd314cfb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 00000000..29f234ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 00000000..7f7044e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 00000000..bfc45102 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 00000000..faa998de --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 00000000..aa97273e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 00000000..f7f40a16 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } + } + + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 00000000..fbe697c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 00000000..6635f7bc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..aef8cec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 00000000..9f409096 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 00000000..b25b0bae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 00000000..e2f948e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 00000000..6f22bd89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 00000000..ec62b477 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 00000000..7a092b2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000..fe49cb74 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000..6c7ea6a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,226 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 00000000..900bfb71 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 00000000..70331913 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 00000000..f0603871 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..b66dcb21 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,441 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +func init() { + resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, address) + } +} + +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: internal.AddressDialer(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver internal.NetResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var waitTime time.Duration + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + waitTime = internal.MinResolutionRate + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + return + case <-internal.TimeAfterFunc(waitTime): + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service + // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", internal.ErrMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 00000000..c7fc557d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 00000000..afac5657 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import ( + "errors" + + "google.golang.org/grpc/resolver" +) + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 00000000..27cd81af --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000..11d82afc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..51e733e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 00000000..03ef2fed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } + details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 00000000..b3a72276 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 00000000..999f52cd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 00000000..4f347edd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 00000000..078137b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 00000000..fd7d43a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 00000000..070680ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 00000000..b330cced --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,1007 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "net" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it any + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() any { + return il.head.it +} + +func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + select { + case <-ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + c.trfChan.Store(make(chan struct{})) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + return nil, errors.New("transport closed by client") + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + conn net.Conn + logger *grpclog.PrefixLogger + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. +func (l *loopyWriter) run() (err error) { + defer func() { + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return + } + } +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + l.applySettings(s.ss) + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + return l.originateStream(str, h) +} + +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) + return nil + } + if err := hdr.initStream(str.id); err != nil { + return err + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) { + str, ok := l.estdStreams[df.streamID] + if !ok { + return + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { + o.resp <- l.sendQuota +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } + return nil +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possible HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 00000000..bc8ee074 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 00000000..97198c51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 00000000..a9d70e2a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,488 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) + } + if r.Method != "POST" { + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) + } + if _, ok := w.(http.Flusher); !ok { + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) + } + + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + peer: p, + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + st.logger = prefixLoggerForServerHandlerTransport(st) + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + peer peer.Peer + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats []stats.Handler + logger *grpclog.PrefixLogger +} + +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) +} + +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + s.hdrMu.Lock() + if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + } + + if len(s.trailer) > 0 { + for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + s.hdrMu.Unlock() + }) + + if err == nil { // transport has not been closed + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close(errors.New("finished writing status")) + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close(errors.New("request is done processing")) + }() + + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + req := ht.req + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain(debugData string) { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 00000000..c33ac596 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1790 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + isyscall "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandlers []stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + registeredCompressors string + + // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + + onClose func(GoAwayReason) + + bufferPool *bufferPool + + connectionID uint64 + logger *grpclog.PrefixLogger +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + } + + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + onClose: onClose, + } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { + t.mu.Unlock() + cleanup(ErrConnClosing) + return ErrConnClosing + } + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + transportDrainRequired := false + checkForStreamQuota := func(it any) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it any) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(any) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close(err error) { + t.mu.Lock() + // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } + sh.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + updateIWS := func(any) bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } + } + t.mu.Unlock() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutex to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + close(s.headerChan) + } + } + + for _, sh := range t.statsHandlers { + if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { + return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } + t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 00000000..f6bac0e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1446 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: config.MaxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } + t := &http2Server{ + done: done, + conn: conn, + peer: peer, + framer: framer, + readerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.logger = prefixLoggerForServerTransport(t) + + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) + if err != nil { + return nil, err + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close(err) + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy.run() + close(t.loopyWriterDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(ctx) + } + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return nil + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + s.cancel() + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close(err) + return + } + t.Close(err) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if err := t.operateHeaders(ctx, frame, handle); err != nil { + t.Close(err) + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } + } + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + + if s.getState() == streamDone { + return nil + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + return t.streamContextErr(s) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return t.streamContextErr(s) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain("max_idle") + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") + } + t.controlBuf.put(closeConnection{}) + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close(err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + } + channelz.RemoveEntry(t.channelzID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) Drain(debugData string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainEvent != nil { + return + } + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + retErr := g.closeConn + if len(t.activeStreams) == 0 { + retErr = errors.New("second GOAWAY written and no active streams left to process") + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if retErr != nil { + return false, retErr + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainEvent.Done(): + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + // RemoteName : + } + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { + return &peer.Peer{ + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func SetConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 00000000..dc29d590 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,465 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "encoding/base64" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + fmt.Fprintf(&sb, "%%%02X", b) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + sb.WriteByte(b) + } else { + fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } + return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + sb.WriteByte(c) + } else { + sb.WriteByte(byte(parsed)) + i += 2 + } + } else { + sb.WriteByte(c) + } + } + return sb.String() +} + +type bufWriter struct { + pool *sync.Pool + buf []byte + offset int + batchSize int + conn net.Conn + err error +} + +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ + batchSize: batchSize, + conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + n, err = w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.flushKeepBuffer() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) + w.offset = 0 + return w.err +} + +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 00000000..42ed2b07 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 00000000..c11b5278 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 00000000..24fa1032 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "google.golang.org/grpc/internal" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) + if err != nil { + return nil, err + } + if proxyURL == nil { + // proxy is disabled if proxyURL is nil. + return conn, err + } + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 00000000..b7b8fec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,851 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + SharedWriteBuffer bool + ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(context.Context, func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close(err error) + + // Peer returns the peer of the server transport. + Peer() *peer.Peer + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain(debugData string) + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 00000000..e8b49277 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000..34d31b5e --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000..1e9485fd --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000..a821ff9b --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000..bf56faa7 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,223 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls +} + +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac + ac.incrCallsStarted() + done := result.Done + result.Done = func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { + var ch chan struct{} + + var lastPickErr error + + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, balancer.PickResult{}, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + } + + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acbw.ac.getReadyTransport(); t != nil { + if channelz.IsOn() { + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil + } + return t, pickResult, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} + +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000..5128f936 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,249 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } + b.state = state.ConnectivityState +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 00000000..73bd6336 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 00000000..a6f26c8a --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..14aa6f20 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 00000000..ada5b9bb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value any +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000..adf89dd9 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,326 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + // + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata any +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +type Target struct { + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 00000000..c79bab12 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000..a4b6bc68 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,963 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return io.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + msg = p.recvBufferPool.Get(int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg any) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + if len(in) == 0 { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + compressedLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, buf, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.compressedLength = len(buf) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) + } else { + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } + return buf, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) +} + +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return errContextDeadline + case context.Canceled: + return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000..e89c5ac6 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,2208 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType any + Methods []MethodDesc + Streams []StreamDesc + Metadata any +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata any +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines + + channelzID *channelz.Identifier + czData *channelzData + + serverWorkerChannel chan func() + serverWorkerChannelClose func() +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle + statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 + recvBufferPool SharedBufferPool + waitForHandlers bool +} + +var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, +} +var globalServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.serverWorker() +} + +// initServerWorkers creates worker goroutines and a channel to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl any) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata any +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelzID *channelz.Identifier +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + + var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(context.Background(), st, rawConn) + s.removeConn(lisAddr, st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { + select { + case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } + go f() + }) +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + if err != nil { + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(r.Context(), st, nil) +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close(errors.New("Server.addConn called when server has already been stopped")) + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain("") + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + } + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + var sendCompressorName string + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + sendCompressorName = cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + var payInfo *payloadInfo + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { + sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, + }) + } + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d, + } + for _, binlog := range binlogs { + binlog.Log(ctx, cm) + } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + sh := &binarylog.ServerHeader{ + Header: h, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + } + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, st) + } + } + return err + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + sm := &binarylog.ServerMessage{ + Message: reply, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) + } + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(stream, statusOK) +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(ctx, statsBegin) + } + } + ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: shs, + } + + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(ctx, end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, logEntry) + } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + ss.sendCompressorName = s.opts.cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server any + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(ss.s, statusOK) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.stop(false) +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + + s.mu.Lock() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() + } + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + + if s.opts.numServerWorkers > 0 { + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } + } +} + +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } + } + s.drain = true + } +} + +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() + } + s.lis = nil +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) + return encoding.GetCodec(proto.Name) + } + return codec +} + +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000..0df11fc0 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,347 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *internalserviceconfig.Duration + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: (*time.Duration)(m.Timeout), + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + + if jrp.MaxAttempts <= 1 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + if rp.MaxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.MaxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..35e7a20a --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 00000000..48a64cfe --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000..dc03731e --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000..4ab70e2d --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,343 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload any + // Data is the serialized message payload. + Data []byte + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload any + // Data is the serialized message payload. + Data []byte + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 00000000..a93360ef --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + + return Convert(err).Code() +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000..d621f52b --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1782 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m any) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } + + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + return nil, err + } + + if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } + return err + } + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) + } + a.s = s + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err + } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } + // Wait for the trailers. + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if a.s != nil { + code = a.s.Status().Code() + } else { + code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. + return err + } + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + + if err != nil { + cs.finish(err) + // Do not return the error. The user should get it by calling Recv(). + return nil, nil + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.buffer { + if err := f(attempt); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + op := func(a *csAttempt) error { + return a.sendMsg(m, hdr, payload, data) + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ + OnClientSide: true, + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } + } + return err +} + +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + + cs.mu.Unlock() + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.pickResult.Done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.pickResult.Done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. + go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() + select { + case <-acCtx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + sendCompressorName string + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler []stats.Handler + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data, + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + } + return nil +} + +func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + } + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000..07f01257 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000..9ded7932 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []any +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 00000000..1ad1ba2a --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.61.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 00000000..5da38a40 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + +# - Ensure all source files contain a copyright message. +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +misspell -error . + +# - gofmt, goimports, go vet, go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + + go mod tidy -compat=1.19 + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +done + +# - Collection of static analysis checks +SC_OUT="$(mktemp)" +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: +Target is deprecated: Use the Target field in the BuildOptions instead. +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000..bb2966e3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,685 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000..ae71007c --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000..29846df2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,382 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000..4b177c82 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,876 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4..24bc98ac 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41..1f57e661 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// Marshal writes the given [proto.Message] in textproto format using default +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -97,9 +100,10 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// Marshal writes the given [proto.Message] in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686c..e942bc98 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1..87e46bd4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,62 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +283,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +357,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 0000000000000000000000000000000000000000..ff6a38360add36f53d48bb0863b701696e0d7b2d GIT binary patch literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H literal 0 HcmV?d00001 diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 00000000..029a6a12 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000..ea1d3e65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000..2999d713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000..f7fea7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000..50578d65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000..934f2dcb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d2083..7e87c760 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e78..099b2bf4 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35..c2d6bd52 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689ba..df53ff40 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -21,11 +22,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +60,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,21 +68,53 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -117,6 +166,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -155,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -178,6 +232,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -202,14 +258,12 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures } Oneof struct { @@ -219,6 +273,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -268,25 +324,30 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false } - return fd.L1.IsPacked + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -322,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -333,10 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -359,16 +421,16 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -391,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -472,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -515,6 +587,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -535,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..8a57d60b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -111,8 +113,12 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +126,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +162,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -164,6 +179,14 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 + } + + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) } // Must allocate all declarations before parsing each descriptor type @@ -219,10 +242,33 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -275,6 +321,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +427,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -391,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -415,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -447,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..e56c91a8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -414,6 +419,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +471,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -489,13 +501,18 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -557,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -577,25 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fd..f4107c05 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..11f5f356 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) + } + return defaultsCache[match] +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc..bfb3b841 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4e..ba83fea4 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b21..f30ab6b5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,28 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +103,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +206,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +219,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -204,6 +229,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -212,29 +243,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -291,12 +319,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -468,7 +525,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -478,6 +534,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -490,7 +547,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -500,6 +556,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -515,7 +572,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -525,6 +581,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -534,6 +591,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -547,6 +611,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +619,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +630,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +651,10 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +666,10 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +684,10 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -624,24 +697,107 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +806,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +830,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +845,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +858,24 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +887,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +913,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +926,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -759,6 +936,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -816,6 +1000,166 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -917,3 +1261,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..9a652a2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98d..5d5771c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041ed..f29e6a8f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..4bb0a7a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { @@ -107,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241..78ee47e4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63..f55dc01e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16..fb35f0ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf..7a16ec13 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2ef..e06ece55 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f8913651..18cb96fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a05..304244a6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6..febd2122 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0ba..e31249f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb..81b2b1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d05..6e8677ee 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab09108..b649f112 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fa..bf0b6049 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,15 +204,21 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } @@ -245,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -255,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -302,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -339,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -558,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a..019399d4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + oneofWrappers = vs } } } @@ -251,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010b..ecb4623d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b..99dc23c6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf..da685e8a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573..5f20ca5d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e..a1f09162 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d34..a008acd0 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 00000000..60166f2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d..dbbf1f68 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 - Patch = 0 + Minor = 34 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946..d75a6534 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err @@ -69,7 +71,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717..80ed16a0 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d..1f847bcc 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -129,7 +162,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda..d248f292 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,22 +11,25 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) @@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab33..3c6fe578 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45..575d1483 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b..7543ee6b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49..052fb5ae 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb120..8fbecb4f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,16 +3,17 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -24,11 +25,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +62,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -91,11 +92,19 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -210,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } @@ -231,7 +241,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1a..85617554 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +116,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -121,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -137,6 +162,14 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } } return fs, nil } @@ -148,6 +181,7 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } @@ -164,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..f3cebab2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { @@ -276,8 +281,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..6de31c2e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 00000000..804830ed --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fsed := defaults.GetDefaults()[0] + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fsed = def + } else { + break + } + } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceff..a5de8d40 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,23 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +129,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { @@ -168,7 +190,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +199,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +222,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +232,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +245,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa1492..c85bfaa5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┐ // │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,14 +153,15 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false @@ -172,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } @@ -436,7 +441,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +485,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f..ea154eec 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -180,6 +178,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +240,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +287,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +334,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +367,41 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +481,12 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +502,44 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +564,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d..cd8fadba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,20 +519,20 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -544,11 +544,17 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +616,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b78..a7b0d06f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 59165254..654599d4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f..75f83a2a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73..9fe83cef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // ╔════════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,12 +65,12 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // ╔═════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // ║ string │ StringKind ║ // ╚═════════╧═════════════════════════════════════╝ // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 93% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22..7f3583ea 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 00000000..f7d38699 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t any) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v any) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x any) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb55977..de177733 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f73..9403eb07 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,108 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +182,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +227,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +298,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +329,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +361,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +422,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +488,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +550,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +612,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +696,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +758,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +784,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1179,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1202,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1285,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1405,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1542,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1605,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1186,12 +1657,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -1711,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -1738,7 +2213,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1772,6 +2246,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1785,7 +2261,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -1893,13 +2368,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1963,6 +2431,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2006,10 +2481,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2030,6 +2501,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2039,11 +2514,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2600,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2631,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2178,19 +2664,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2205,11 +2683,13 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2800,30 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport } return nil } @@ -2348,6 +2841,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2879,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2911,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2981,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +3006,22 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3063,27 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3097,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3145,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3178,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3236,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2794,6 +3353,171 @@ func (x *UninterpretedOption) GetAggregateValue() string { return "" } +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { + if x != nil { + return x.Defaults + } + return nil +} + +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN +} + // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { @@ -2855,7 +3579,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3592,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3605,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3631,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3644,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3657,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3680,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3693,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3745,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3758,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3802,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3814,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3827,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3864,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3896,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3909,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3939,143 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +4093,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4136,75 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { + if x != nil { + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3296,7 +4214,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -3388,7 +4306,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4332,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4419,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4468,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,250 +4506,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -3856,88 +4774,130 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, @@ -3967,14 +4927,18 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, @@ -3985,130 +4949,284 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5241,143 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4228,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -4240,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -4252,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -4264,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -4278,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -4290,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -4302,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -4314,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -4326,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -4338,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -4350,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -4364,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -4378,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -4392,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -4406,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -4420,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -4434,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -4448,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -4462,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -4474,7 +5632,33 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -4486,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -4498,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -4510,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -4522,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -4534,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -4546,7 +5730,31 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -4558,7 +5766,19 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -4570,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5808,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go new file mode 100644 index 00000000..1ba1dfa5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -0,0 +1,718 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dynamicpb creates protocol buffer messages using runtime type information. +package dynamicpb + +import ( + "math" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// enum is a dynamic protoreflect.Enum. +type enum struct { + num protoreflect.EnumNumber + typ protoreflect.EnumType +} + +func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() } +func (e enum) Type() protoreflect.EnumType { return e.typ } +func (e enum) Number() protoreflect.EnumNumber { return e.num } + +// enumType is a dynamic protoreflect.EnumType. +type enumType struct { + desc protoreflect.EnumDescriptor +} + +// NewEnumType creates a new EnumType with the provided descriptor. +// +// EnumTypes created by this package are equal if their descriptors are equal. +// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2). +// +// Enum values created by the EnumType are equal if their numbers are equal. +func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType { + return enumType{desc} +} + +func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} } +func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc } + +// extensionType is a dynamic protoreflect.ExtensionType. +type extensionType struct { + desc extensionTypeDescriptor +} + +// A Message is a dynamically constructed protocol buffer message. +// +// Message implements the [google.golang.org/protobuf/proto.Message] interface, +// and may be used with all standard proto package functions +// such as Marshal, Unmarshal, and so forth. +// +// Message also implements the [protoreflect.Message] interface. +// See the [protoreflect] package documentation for that interface for how to +// get and set fields and otherwise interact with the contents of a Message. +// +// Reflection API functions which construct messages, such as NewField, +// return new dynamic messages of the appropriate type. Functions which take +// messages, such as Set for a message-value field, will accept any message +// with a compatible type. +// +// Operations which modify a Message are not safe for concurrent use. +type Message struct { + typ messageType + known map[protoreflect.FieldNumber]protoreflect.Value + ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor + unknown protoreflect.RawFields +} + +var ( + _ protoreflect.Message = (*Message)(nil) + _ protoreflect.ProtoMessage = (*Message)(nil) + _ protoiface.MessageV1 = (*Message)(nil) +) + +// NewMessage creates a new message with the provided descriptor. +func NewMessage(desc protoreflect.MessageDescriptor) *Message { + return &Message{ + typ: messageType{desc}, + known: make(map[protoreflect.FieldNumber]protoreflect.Value), + ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor), + } +} + +// ProtoMessage implements the legacy message interface. +func (m *Message) ProtoMessage() {} + +// ProtoReflect implements the [protoreflect.ProtoMessage] interface. +func (m *Message) ProtoReflect() protoreflect.Message { + return m +} + +// String returns a string representation of a message. +func (m *Message) String() string { + return protoimpl.X.MessageStringOf(m) +} + +// Reset clears the message to be empty, but preserves the dynamic message type. +func (m *Message) Reset() { + m.known = make(map[protoreflect.FieldNumber]protoreflect.Value) + m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor) + m.unknown = nil +} + +// Descriptor returns the message descriptor. +func (m *Message) Descriptor() protoreflect.MessageDescriptor { + return m.typ.desc +} + +// Type returns the message type. +func (m *Message) Type() protoreflect.MessageType { + return m.typ +} + +// New returns a newly allocated empty message with the same descriptor. +// See [protoreflect.Message] for details. +func (m *Message) New() protoreflect.Message { + return m.Type().New() +} + +// Interface returns the message. +// See [protoreflect.Message] for details. +func (m *Message) Interface() protoreflect.ProtoMessage { + return m +} + +// ProtoMethods is an internal detail of the [protoreflect.Message] interface. +// Users should never call this directly. +func (m *Message) ProtoMethods() *protoiface.Methods { + return nil +} + +// Range visits every populated field in undefined order. +// See [protoreflect.Message] for details. +func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + for num, v := range m.known { + fd := m.ext[num] + if fd == nil { + fd = m.Descriptor().Fields().ByNumber(num) + } + if !isSet(fd, v) { + continue + } + if !f(fd, v) { + return + } + } +} + +// Has reports whether a field is populated. +// See [protoreflect.Message] for details. +func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { + m.checkField(fd) + if fd.IsExtension() && m.ext[fd.Number()] != fd { + return false + } + v, ok := m.known[fd.Number()] + if !ok { + return false + } + return isSet(fd, v) +} + +// Clear clears a field. +// See [protoreflect.Message] for details. +func (m *Message) Clear(fd protoreflect.FieldDescriptor) { + m.checkField(fd) + num := fd.Number() + delete(m.known, num) + delete(m.ext, num) +} + +// Get returns the value of a field. +// See [protoreflect.Message] for details. +func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + switch { + case fd.IsMap(): + if v.Map().Len() > 0 { + return v + } + case fd.IsList(): + if v.List().Len() > 0 { + return v + } + default: + return v + } + } + switch { + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: fd}) + case fd.IsList(): + return protoreflect.ValueOfList(emptyList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}}) + case fd.Kind() == protoreflect.BytesKind: + return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...)) + default: + return fd.Default() + } +} + +// Mutable returns a mutable reference to a repeated, map, or message field. +// See [protoreflect.Message] for details. +func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { + panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName())) + } + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + m.ext[num] = fd + m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + return v + } + m.clearOtherOneofFields(fd) + m.known[num] = m.NewField(fd) + if fd.IsExtension() { + m.ext[num] = fd + } + return m.known[num] +} + +// Set stores a value in a field. +// See [protoreflect.Message] for details. +func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.checkField(fd) + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + if fd.IsExtension() { + isValid := true + switch { + case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v): + isValid = false + case fd.IsList(): + isValid = v.List().IsValid() + case fd.IsMap(): + isValid = v.Map().IsValid() + case fd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())) + } + m.ext[fd.Number()] = fd + } else { + typecheck(fd, v) + } + m.clearOtherOneofFields(fd) + m.known[fd.Number()] = v +} + +func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { + od := fd.ContainingOneof() + if od == nil { + return + } + num := fd.Number() + for i := 0; i < od.Fields().Len(); i++ { + if n := od.Fields().Get(i).Number(); n != num { + delete(m.known, n) + } + } +} + +// NewField returns a new value for assignable to the field of a given descriptor. +// See [protoreflect.Message] for details. +func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + switch { + case fd.IsExtension(): + return fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: fd, + mapv: make(map[any]protoreflect.Value), + }) + case fd.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + default: + return fd.Default() + } +} + +// WhichOneof reports which field in a oneof is populated, returning nil if none are populated. +// See [protoreflect.Message] for details. +func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + for i := 0; i < od.Fields().Len(); i++ { + fd := od.Fields().Get(i) + if m.Has(fd) { + return fd + } + } + return nil +} + +// GetUnknown returns the raw unknown fields. +// See [protoreflect.Message] for details. +func (m *Message) GetUnknown() protoreflect.RawFields { + return m.unknown +} + +// SetUnknown sets the raw unknown fields. +// See [protoreflect.Message] for details. +func (m *Message) SetUnknown(r protoreflect.RawFields) { + if m.known == nil { + panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) + } + m.unknown = r +} + +// IsValid reports whether the message is valid. +// See [protoreflect.Message] for details. +func (m *Message) IsValid() bool { + return m.known != nil +} + +func (m *Message) checkField(fd protoreflect.FieldDescriptor) { + if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() { + if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok { + panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName())) + } + return + } + if fd.Parent() == m.Descriptor() { + return + } + fields := m.Descriptor().Fields() + index := fd.Index() + if index >= fields.Len() || fields.Get(index) != fd { + panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName())) + } +} + +type messageType struct { + desc protoreflect.MessageDescriptor +} + +// NewMessageType creates a new MessageType with the provided descriptor. +// +// MessageTypes created by this package are equal if their descriptors are equal. +// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2). +func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType { + return messageType{desc} +} + +func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) } +func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} } +func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc } +func (mt messageType) Enum(i int) protoreflect.EnumType { + if ed := mt.desc.Fields().Get(i).Enum(); ed != nil { + return NewEnumType(ed) + } + return nil +} +func (mt messageType) Message(i int) protoreflect.MessageType { + if md := mt.desc.Fields().Get(i).Message(); md != nil { + return NewMessageType(md) + } + return nil +} + +type emptyList struct { + desc protoreflect.FieldDescriptor +} + +func (x emptyList) Len() int { return 0 } +func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) } +func (x emptyList) Set(n int, v protoreflect.Value) { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) } +func (x emptyList) AppendMutable() protoreflect.Value { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) } +func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) } +func (x emptyList) IsValid() bool { return false } + +type dynamicList struct { + desc protoreflect.FieldDescriptor + list []protoreflect.Value +} + +func (x *dynamicList) Len() int { + return len(x.list) +} + +func (x *dynamicList) Get(n int) protoreflect.Value { + return x.list[n] +} + +func (x *dynamicList) Set(n int, v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list[n] = v +} + +func (x *dynamicList) Append(v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list = append(x.list, v) +} + +func (x *dynamicList) AppendMutable() protoreflect.Value { + if x.desc.Message() == nil { + panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName())) + } + v := x.NewElement() + x.Append(v) + return v +} + +func (x *dynamicList) Truncate(n int) { + // Zero truncated elements to avoid keeping data live. + for i := n; i < len(x.list); i++ { + x.list[i] = protoreflect.Value{} + } + x.list = x.list[:n] +} + +func (x *dynamicList) NewElement() protoreflect.Value { + return newListEntry(x.desc) +} + +func (x *dynamicList) IsValid() bool { + return true +} + +type dynamicMap struct { + desc protoreflect.FieldDescriptor + mapv map[any]protoreflect.Value +} + +func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } +func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) { + typecheckSingular(x.desc.MapKey(), k.Value()) + typecheckSingular(x.desc.MapValue(), v) + x.mapv[k.Interface()] = v +} +func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() } +func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) } +func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value { + if x.desc.MapValue().Message() == nil { + panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName())) + } + v := x.Get(k) + if !v.IsValid() { + v = x.NewValue() + x.Set(k, v) + } + return v +} +func (x *dynamicMap) Len() int { return len(x.mapv) } +func (x *dynamicMap) NewValue() protoreflect.Value { + if md := x.desc.MapValue().Message(); md != nil { + return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect()) + } + return x.desc.MapValue().Default() +} +func (x *dynamicMap) IsValid() bool { + return x.mapv != nil +} + +func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + for k, v := range x.mapv { + if !f(protoreflect.ValueOf(k).MapKey(), v) { + return + } + } +} + +func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsMap(): + return v.Map().Len() > 0 + case fd.IsList(): + return v.List().Len() > 0 + case fd.ContainingOneof() != nil: + return true + case !fd.HasPresence() && !fd.IsExtension(): + switch fd.Kind() { + case protoreflect.BoolKind: + return v.Bool() + case protoreflect.EnumKind: + return v.Enum() != 0 + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return v.Int() != 0 + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return v.Uint() != 0 + case protoreflect.FloatKind, protoreflect.DoubleKind: + return v.Float() != 0 || math.Signbit(v.Float()) + case protoreflect.StringKind: + return v.String() != "" + case protoreflect.BytesKind: + return len(v.Bytes()) > 0 + } + } + return true +} + +func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := typeIsValid(fd, v); err != nil { + panic(err) + } +} + +func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + switch { + case !v.IsValid(): + return errors.New("%v: assigning invalid value", fd.FullName()) + case fd.IsMap(): + if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil + case fd.IsList(): + switch list := v.Interface().(type) { + case *dynamicList: + if list.desc == fd && list.IsValid() { + return nil + } + case emptyList: + if list.desc == fd && list.IsValid() { + return nil + } + } + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + default: + return singularTypeIsValid(fd, v) + } +} + +func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := singularTypeIsValid(fd, v); err != nil { + panic(err) + } +} + +func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + vi := v.Interface() + var ok bool + switch fd.Kind() { + case protoreflect.BoolKind: + _, ok = vi.(bool) + case protoreflect.EnumKind: + // We could check against the valid set of enum values, but do not. + _, ok = vi.(protoreflect.EnumNumber) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + _, ok = vi.(int32) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + _, ok = vi.(uint32) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + _, ok = vi.(int64) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + _, ok = vi.(uint64) + case protoreflect.FloatKind: + _, ok = vi.(float32) + case protoreflect.DoubleKind: + _, ok = vi.(float64) + case protoreflect.StringKind: + _, ok = vi.(string) + case protoreflect.BytesKind: + _, ok = vi.([]byte) + case protoreflect.MessageKind, protoreflect.GroupKind: + var m protoreflect.Message + m, ok = vi.(protoreflect.Message) + if ok && m.Descriptor().FullName() != fd.Message().FullName() { + return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName()) + } + if dm, ok := vi.(*Message); ok && dm.known == nil { + return errors.New("%v: assigning invalid zero-value message", fd.FullName()) + } + } + if !ok { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil +} + +func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.Kind() { + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.MessageKind, protoreflect.GroupKind: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + } + panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind())) +} + +// NewExtensionType creates a new ExtensionType with the provided descriptor. +// +// Dynamic ExtensionTypes with the same descriptor compare as equal. That is, +// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2). +// +// The InterfaceOf and ValueOf methods of the extension type are defined as: +// +// func (xt extensionType) ValueOf(iv any) protoreflect.Value { +// return protoreflect.ValueOf(iv) +// } +// +// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { +// return v.Interface() +// } +// +// The Go type used by the proto.GetExtension and proto.SetExtension functions +// is determined by these methods, and is therefore equivalent to the Go type +// used to represent a protoreflect.Value. See the protoreflect.Value +// documentation for more details. +func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { + if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok { + desc = xt.Descriptor() + } + return extensionType{extensionTypeDescriptor{desc}} +} + +func (xt extensionType) New() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: xt.desc, + mapv: make(map[any]protoreflect.Value), + }) + case xt.desc.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message())) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) Zero() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc}) + case xt.desc.Cardinality() == protoreflect.Repeated: + return protoreflect.ValueOfList(emptyList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}}) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return xt.desc +} + +func (xt extensionType) ValueOf(iv any) protoreflect.Value { + v := protoreflect.ValueOf(iv) + typecheck(xt.desc, v) + return v +} + +func (xt extensionType) InterfaceOf(v protoreflect.Value) any { + typecheck(xt.desc, v) + return v.Interface() +} + +func (xt extensionType) IsValidInterface(iv any) bool { + return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil +} + +func (xt extensionType) IsValidValue(v protoreflect.Value) bool { + return typeIsValid(xt.desc, v) == nil +} + +type extensionTypeDescriptor struct { + protoreflect.ExtensionDescriptor +} + +func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return extensionType{xt} +} + +func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xt.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go new file mode 100644 index 00000000..c432817b --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -0,0 +1,184 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicpb + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +type extField struct { + name protoreflect.FullName + number protoreflect.FieldNumber +} + +// A Types is a collection of dynamically constructed descriptors. +// Its methods are safe for concurrent use. +// +// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver]. +// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver]. +type Types struct { + // atomicExtFiles is used with sync/atomic and hence must be the first word + // of the struct to guarantee 64-bit alignment. + // + // TODO(stapelberg): once we only support Go 1.19 and newer, switch this + // field to be of type atomic.Uint64 to guarantee alignment on + // stack-allocated values, too. + atomicExtFiles uint64 + extMu sync.Mutex + + files *protoregistry.Files + + extensionsByMessage map[extField]protoreflect.ExtensionDescriptor +} + +// NewTypes creates a new Types registry with the provided files. +// The Files registry is retained, and changes to Files will be reflected in Types. +// It is not safe to concurrently change the Files while calling Types methods. +func NewTypes(f *protoregistry.Files) *Types { + return &Types{ + files: f, + } +} + +// FindEnumByName looks up an enum by its full name; +// e.g., "google.protobuf.Field.Kind". +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want enum", descName(d)) + } + return NewEnumType(ed), nil +} + +// FindExtensionByName looks up an extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + xd, ok := d.(protoreflect.ExtensionDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want extension", descName(d)) + } + return NewExtensionType(xd), nil +} + +// FindExtensionByNumber looks up an extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + // Construct the extension number map lazily, since not every user will need it. + // Update the map if new files are added to the registry. + if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + t.updateExtensions() + } + xd := t.extensionsByMessage[extField{message, field}] + if xd == nil { + return nil, protoregistry.NotFound + } + return NewExtensionType(xd), nil +} + +// FindMessageByName looks up a message by its full name; +// e.g. "google.protobuf.Any". +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want message", descName(d)) + } + return NewMessageType(md), nil +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + return t.FindMessageByName(message) +} + +func (t *Types) updateExtensions() { + t.extMu.Lock() + defer t.extMu.Unlock() + if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + return + } + defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + t.registerExtensions(fd.Extensions()) + t.registerExtensionsInMessages(fd.Messages()) + return true + }) +} + +func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) { + count := mds.Len() + for i := 0; i < count; i++ { + md := mds.Get(i) + t.registerExtensions(md.Extensions()) + t.registerExtensionsInMessages(md.Messages()) + } +} + +func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) { + count := xds.Len() + for i := 0; i < count; i++ { + xd := xds.Get(i) + field := xd.Number() + message := xd.ContainingMessage().FullName() + if t.extensionsByMessage == nil { + t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor) + } + t.extensionsByMessage[extField{message, field}] = xd + } +} + +func descName(d protoreflect.Descriptor) string { + switch d.(type) { + case protoreflect.EnumDescriptor: + return "enum" + case protoreflect.EnumValueDescriptor: + return "enum value" + case protoreflect.MessageDescriptor: + return "message" + case protoreflect.ExtensionDescriptor: + return "extension" + case protoreflect.ServiceDescriptor: + return "service" + default: + return fmt.Sprintf("%T", d) + } +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..a2ca940c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,181 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/go_features.proto + +package gofeaturespb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "pb.go", + Tag: "bytes,1002,opt,name=go", + Filename: "google/protobuf/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] +) + +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +} + +var ( + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc +) + +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + }) + return file_google_protobuf_go_features_proto_rawDescData +} + +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, + }.Build() + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f..7172b43d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. @@ -444,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -461,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go new file mode 100644 index 00000000..4f2fe89e --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go @@ -0,0 +1,575 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/api.proto + +package apipb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + typepb "google.golang.org/protobuf/types/known/typepb" + reflect "reflect" + sync "sync" +) + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*typepb.Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Api) Reset() { + *x = Api{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Api) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Api) ProtoMessage() {} + +func (x *Api) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Api.ProtoReflect.Descriptor instead. +func (*Api) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{0} +} + +func (x *Api) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Api) GetMethods() []*Method { + if x != nil { + return x.Methods + } + return nil +} + +func (x *Api) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Api) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Api) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Api) GetMixins() []*Mixin { + if x != nil { + return x.Mixins + } + return nil +} + +func (x *Api) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Method represents a method of an API interface. +type Method struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*typepb.Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax typepb.Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (x *Method) Reset() { + *x = Method{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Method) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Method) ProtoMessage() {} + +func (x *Method) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Method.ProtoReflect.Descriptor instead. +func (*Method) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{1} +} + +func (x *Method) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Method) GetRequestTypeUrl() string { + if x != nil { + return x.RequestTypeUrl + } + return "" +} + +func (x *Method) GetRequestStreaming() bool { + if x != nil { + return x.RequestStreaming + } + return false +} + +func (x *Method) GetResponseTypeUrl() string { + if x != nil { + return x.ResponseTypeUrl + } + return "" +} + +func (x *Method) GetResponseStreaming() bool { + if x != nil { + return x.ResponseStreaming + } + return false +} + +func (x *Method) GetOptions() []*typepb.Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Method) GetSyntax() typepb.Syntax { + if x != nil { + return x.Syntax + } + return typepb.Syntax(0) +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` +} + +func (x *Mixin) Reset() { + *x = Mixin{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mixin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mixin) ProtoMessage() {} + +func (x *Mixin) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mixin.ProtoReflect.Descriptor instead. +func (*Mixin) Descriptor() ([]byte, []int) { + return file_google_protobuf_api_proto_rawDescGZIP(), []int{2} +} + +func (x *Mixin) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Mixin) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +var File_google_protobuf_api_proto protoreflect.FileDescriptor + +var file_google_protobuf_api_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x24, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, + 0x02, 0x0a, 0x03, 0x41, 0x70, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x31, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x69, 0x78, 0x69, 0x6e, 0x52, 0x06, 0x6d, 0x69, 0x78, 0x69, 0x6e, + 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, + 0x61, 0x78, 0x22, 0xb2, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x11, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0x2f, 0x0a, 0x05, 0x4d, 0x69, 0x78, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x08, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_api_proto_rawDescOnce sync.Once + file_google_protobuf_api_proto_rawDescData = file_google_protobuf_api_proto_rawDesc +) + +func file_google_protobuf_api_proto_rawDescGZIP() []byte { + file_google_protobuf_api_proto_rawDescOnce.Do(func() { + file_google_protobuf_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_api_proto_rawDescData) + }) + return file_google_protobuf_api_proto_rawDescData +} + +var file_google_protobuf_api_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_protobuf_api_proto_goTypes = []any{ + (*Api)(nil), // 0: google.protobuf.Api + (*Method)(nil), // 1: google.protobuf.Method + (*Mixin)(nil), // 2: google.protobuf.Mixin + (*typepb.Option)(nil), // 3: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 4: google.protobuf.SourceContext + (typepb.Syntax)(0), // 5: google.protobuf.Syntax +} +var file_google_protobuf_api_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.Api.methods:type_name -> google.protobuf.Method + 3, // 1: google.protobuf.Api.options:type_name -> google.protobuf.Option + 4, // 2: google.protobuf.Api.source_context:type_name -> google.protobuf.SourceContext + 2, // 3: google.protobuf.Api.mixins:type_name -> google.protobuf.Mixin + 5, // 4: google.protobuf.Api.syntax:type_name -> google.protobuf.Syntax + 3, // 5: google.protobuf.Method.options:type_name -> google.protobuf.Option + 5, // 6: google.protobuf.Method.syntax:type_name -> google.protobuf.Syntax + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_protobuf_api_proto_init() } +func file_google_protobuf_api_proto_init() { + if File_google_protobuf_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Api); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Method); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Mixin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_api_proto_goTypes, + DependencyIndexes: file_google_protobuf_api_proto_depIdxs, + MessageInfos: file_google_protobuf_api_proto_msgTypes, + }.Build() + File_google_protobuf_api_proto = out.File + file_google_protobuf_api_proto_rawDesc = nil + file_google_protobuf_api_proto_goTypes = nil + file_google_protobuf_api_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 00000000..1b71bcd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,374 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// # Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// # Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []any{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 00000000..d87b4fb8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,166 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []any{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 00000000..ac1e91bb --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,588 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []any{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*FieldMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go new file mode 100644 index 00000000..fa185780 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go @@ -0,0 +1,176 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package sourcecontextpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` +} + +func (x *SourceContext) Reset() { + *x = SourceContext{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceContext) ProtoMessage() {} + +func (x *SourceContext) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_source_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceContext.ProtoReflect.Descriptor instead. +func (*SourceContext) Descriptor() ([]byte, []int) { + return file_google_protobuf_source_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SourceContext) GetFileName() string { + if x != nil { + return x.FileName + } + return "" +} + +var File_google_protobuf_source_context_proto protoreflect.FileDescriptor + +var file_google_protobuf_source_context_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x2c, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x8a, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x12, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_source_context_proto_rawDescOnce sync.Once + file_google_protobuf_source_context_proto_rawDescData = file_google_protobuf_source_context_proto_rawDesc +) + +func file_google_protobuf_source_context_proto_rawDescGZIP() []byte { + file_google_protobuf_source_context_proto_rawDescOnce.Do(func() { + file_google_protobuf_source_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_source_context_proto_rawDescData) + }) + return file_google_protobuf_source_context_proto_rawDescData +} + +var file_google_protobuf_source_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_source_context_proto_goTypes = []any{ + (*SourceContext)(nil), // 0: google.protobuf.SourceContext +} +var file_google_protobuf_source_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_source_context_proto_init() } +func file_google_protobuf_source_context_proto_init() { + if File_google_protobuf_source_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SourceContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_source_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_source_context_proto_goTypes, + DependencyIndexes: file_google_protobuf_source_context_proto_depIdxs, + MessageInfos: file_google_protobuf_source_context_proto_msgTypes, + }.Build() + File_google_protobuf_source_context_proto = out.File + file_google_protobuf_source_context_proto_rawDesc = nil + file_google_protobuf_source_context_proto_goTypes = nil + file_google_protobuf_source_context_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 00000000..d45361cb --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,810 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// # Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by any, map[string]any, and []any. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the any, map[string]any, and []any +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// # Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]any{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]any{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []any{ +// map[string]any{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]any{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []any{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +package structpb + +import ( + base64 "encoding/base64" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]any) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]any { + f := x.GetFields() + vs := make(map[string]any, len(f)) + for k, v := range f { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of value. + // + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v any) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]any: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []any: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() any { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x, ok := x.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x, ok := x.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []any) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []any { + vals := x.GetValues() + vs := make([]any, len(vals)) + for i, v := range vals { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []any{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Struct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_rawDesc = nil + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a33..83a5a645 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go new file mode 100644 index 00000000..52887fd5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go @@ -0,0 +1,990 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/type.proto + +package typepb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + sourcecontextpb "google.golang.org/protobuf/types/known/sourcecontextpb" + reflect "reflect" + sync "sync" +) + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 + // Syntax `editions`. + Syntax_SYNTAX_EDITIONS Syntax = 2 +) + +// Enum value maps for Syntax. +var ( + Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", + 2: "SYNTAX_EDITIONS", + } + Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, + "SYNTAX_EDITIONS": 2, + } +) + +func (x Syntax) Enum() *Syntax { + p := new(Syntax) + *p = x + return p +} + +func (x Syntax) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Syntax) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[0].Descriptor() +} + +func (Syntax) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[0] +} + +func (x Syntax) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Syntax.Descriptor instead. +func (Syntax) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +// Enum value maps for Field_Kind. +var ( + Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x Field_Kind) Enum() *Field_Kind { + p := new(Field_Kind) + *p = x + return p +} + +func (x Field_Kind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Kind) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[1].Descriptor() +} + +func (Field_Kind) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[1] +} + +func (x Field_Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Kind.Descriptor instead. +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +// Enum value maps for Field_Cardinality. +var ( + Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", + } + Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, + } +) + +func (x Field_Cardinality) Enum() *Field_Cardinality { + p := new(Field_Cardinality) + *p = x + return p +} + +func (x Field_Cardinality) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Field_Cardinality) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_type_proto_enumTypes[2].Descriptor() +} + +func (Field_Cardinality) Type() protoreflect.EnumType { + return &file_google_protobuf_type_proto_enumTypes[2] +} + +func (x Field_Cardinality) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Field_Cardinality.Descriptor instead. +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,7,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{0} +} + +func (x *Type) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type) GetFields() []*Field { + if x != nil { + return x.Fields + } + return nil +} + +func (x *Type) GetOneofs() []string { + if x != nil { + return x.Oneofs + } + return nil +} + +func (x *Type) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Type) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Type) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Type) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// A single field of a message type. +type Field struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *Field) Reset() { + *x = Field{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Field) ProtoMessage() {} + +func (x *Field) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Field.ProtoReflect.Descriptor instead. +func (*Field) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{1} +} + +func (x *Field) GetKind() Field_Kind { + if x != nil { + return x.Kind + } + return Field_TYPE_UNKNOWN +} + +func (x *Field) GetCardinality() Field_Cardinality { + if x != nil { + return x.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (x *Field) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *Field) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Field) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Field) GetOneofIndex() int32 { + if x != nil { + return x.OneofIndex + } + return 0 +} + +func (x *Field) GetPacked() bool { + if x != nil { + return x.Packed + } + return false +} + +func (x *Field) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Field) GetJsonName() string { + if x != nil { + return x.JsonName + } + return "" +} + +func (x *Field) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Enum type definition. +type Enum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *sourcecontextpb.SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + // The source edition string, only valid when syntax is SYNTAX_EDITIONS. + Edition string `protobuf:"bytes,6,opt,name=edition,proto3" json:"edition,omitempty"` +} + +func (x *Enum) Reset() { + *x = Enum{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Enum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Enum) ProtoMessage() {} + +func (x *Enum) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Enum.ProtoReflect.Descriptor instead. +func (*Enum) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{2} +} + +func (x *Enum) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Enum) GetEnumvalue() []*EnumValue { + if x != nil { + return x.Enumvalue + } + return nil +} + +func (x *Enum) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +func (x *Enum) GetSourceContext() *sourcecontextpb.SourceContext { + if x != nil { + return x.SourceContext + } + return nil +} + +func (x *Enum) GetSyntax() Syntax { + if x != nil { + return x.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (x *Enum) GetEdition() string { + if x != nil { + return x.Edition + } + return "" +} + +// Enum value definition. +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{3} +} + +func (x *EnumValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EnumValue) GetNumber() int32 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *EnumValue) GetOptions() []*Option { + if x != nil { + return x.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *anypb.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Option) Reset() { + *x = Option{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_type_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Option) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Option) ProtoMessage() {} + +func (x *Option) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_type_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Option.ProtoReflect.Descriptor instead. +func (*Option) Descriptor() ([]byte, []int) { + return file_google_protobuf_type_proto_rawDescGZIP(), []int{4} +} + +func (x *Option) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Option) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_type_proto protoreflect.FileDescriptor + +var file_google_protobuf_type_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb4, 0x06, 0x0a, 0x05, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x2f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x44, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x2e, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc8, + 0x02, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, + 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, + 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, + 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, + 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x74, 0x0a, 0x0b, 0x43, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x41, 0x52, 0x44, + 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, + 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, + 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x41, + 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, + 0x99, 0x02, 0x0a, 0x04, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6a, 0x0a, 0x09, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x06, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2a, 0x43, 0x0a, 0x06, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x59, 0x4e, 0x54, 0x41, 0x58, 0x5f, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x02, 0x42, 0x7b, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x09, 0x54, + 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_type_proto_rawDescOnce sync.Once + file_google_protobuf_type_proto_rawDescData = file_google_protobuf_type_proto_rawDesc +) + +func file_google_protobuf_type_proto_rawDescGZIP() []byte { + file_google_protobuf_type_proto_rawDescOnce.Do(func() { + file_google_protobuf_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_type_proto_rawDescData) + }) + return file_google_protobuf_type_proto_rawDescData +} + +var file_google_protobuf_type_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_protobuf_type_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_protobuf_type_proto_goTypes = []any{ + (Syntax)(0), // 0: google.protobuf.Syntax + (Field_Kind)(0), // 1: google.protobuf.Field.Kind + (Field_Cardinality)(0), // 2: google.protobuf.Field.Cardinality + (*Type)(nil), // 3: google.protobuf.Type + (*Field)(nil), // 4: google.protobuf.Field + (*Enum)(nil), // 5: google.protobuf.Enum + (*EnumValue)(nil), // 6: google.protobuf.EnumValue + (*Option)(nil), // 7: google.protobuf.Option + (*sourcecontextpb.SourceContext)(nil), // 8: google.protobuf.SourceContext + (*anypb.Any)(nil), // 9: google.protobuf.Any +} +var file_google_protobuf_type_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Type.fields:type_name -> google.protobuf.Field + 7, // 1: google.protobuf.Type.options:type_name -> google.protobuf.Option + 8, // 2: google.protobuf.Type.source_context:type_name -> google.protobuf.SourceContext + 0, // 3: google.protobuf.Type.syntax:type_name -> google.protobuf.Syntax + 1, // 4: google.protobuf.Field.kind:type_name -> google.protobuf.Field.Kind + 2, // 5: google.protobuf.Field.cardinality:type_name -> google.protobuf.Field.Cardinality + 7, // 6: google.protobuf.Field.options:type_name -> google.protobuf.Option + 6, // 7: google.protobuf.Enum.enumvalue:type_name -> google.protobuf.EnumValue + 7, // 8: google.protobuf.Enum.options:type_name -> google.protobuf.Option + 8, // 9: google.protobuf.Enum.source_context:type_name -> google.protobuf.SourceContext + 0, // 10: google.protobuf.Enum.syntax:type_name -> google.protobuf.Syntax + 7, // 11: google.protobuf.EnumValue.options:type_name -> google.protobuf.Option + 9, // 12: google.protobuf.Option.value:type_name -> google.protobuf.Any + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_google_protobuf_type_proto_init() } +func file_google_protobuf_type_proto_init() { + if File_google_protobuf_type_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Field); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Enum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Option); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_type_proto_rawDesc, + NumEnums: 3, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_type_proto_goTypes, + DependencyIndexes: file_google_protobuf_type_proto_depIdxs, + EnumInfos: file_google_protobuf_type_proto_enumTypes, + MessageInfos: file_google_protobuf_type_proto_msgTypes, + }.Build() + File_google_protobuf_type_proto = out.File + file_google_protobuf_type_proto_rawDesc = nil + file_google_protobuf_type_proto_goTypes = nil + file_google_protobuf_type_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 00000000..e473f826 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,760 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []any{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DoubleValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*FloatValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Int64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*UInt64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Int32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*UInt32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*BoolValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*StringValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*BytesValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 00000000..9066bcc7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,690 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Author: kenton@google.com (Kenton Varda) +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 + CodeGeneratorResponse_FEATURE_SUPPORTS_EDITIONS CodeGeneratorResponse_Feature = 2 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + 2: "FEATURE_SUPPORTS_EDITIONS", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + "FEATURE_SUPPORTS_EDITIONS": 2, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // Note: the files listed in files_to_generate will include runtime-retention + // options only, but all other files will include source-retention options. + // The source_file_descriptors field below is available in case you need + // source-retention options for files_to_generate. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // File descriptors with all options, including source-retention options. + // These descriptors are only provided for the files listed in + // files_to_generate. + SourceFileDescriptors []*descriptorpb.FileDescriptorProto `protobuf:"bytes,17,rep,name=source_file_descriptors,json=sourceFileDescriptors" json:"source_file_descriptors,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetSourceFileDescriptors() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.SourceFileDescriptors + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + // The minimum edition this plugin supports. This will be treated as an + // Edition enum, but we want to allow unknown values. It should be specified + // according the edition enum value, *not* the edition number. Only takes + // effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. + MinimumEdition *int32 `protobuf:"varint,3,opt,name=minimum_edition,json=minimumEdition" json:"minimum_edition,omitempty"` + // The maximum edition this plugin supports. This will be treated as an + // Edition enum, but we want to allow unknown values. It should be specified + // according the edition enum value, *not* the edition number. Only takes + // effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. + MaximumEdition *int32 `protobuf:"varint,4,opt,name=maximum_edition,json=maximumEdition" json:"maximum_edition,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetMinimumEdition() int32 { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return 0 +} + +func (x *CodeGeneratorResponse) GetMaximumEdition() int32 { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // + // @@protoc_insertion_point(NAME) + // + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // + // // @@protoc_insertion_point(namespace_scope) + // + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xcf, 0x02, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x5c, + 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x10, + 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x04, 0x0a, 0x15, 0x43, + 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x57, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, + 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x53, 0x5f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x53, + 0x10, 0x02, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, + 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []any{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 5, // 1: google.protobuf.compiler.CodeGeneratorRequest.source_file_descriptors:type_name -> google.protobuf.FileDescriptorProto + 1, // 2: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 3: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 4: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0158dfa8..6797b689 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -64,8 +64,6 @@ github.com/antchfx/xpath # github.com/araddon/dateparse v0.0.0-20201001162425-8aadafed4dc4 ## explicit; go 1.12 github.com/araddon/dateparse -# github.com/avast/retry-go v3.0.0+incompatible -## explicit # github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f ## explicit # github.com/beorn7/perks v1.0.1 @@ -75,6 +73,21 @@ github.com/beorn7/perks/quantile ## explicit; go 1.21 github.com/brianvoe/gofakeit/v6 github.com/brianvoe/gofakeit/v6/data +# github.com/bufbuild/protocompile v0.14.1 +## explicit; go 1.21 +github.com/bufbuild/protocompile +github.com/bufbuild/protocompile/ast +github.com/bufbuild/protocompile/internal +github.com/bufbuild/protocompile/internal/editions +github.com/bufbuild/protocompile/internal/featuresext +github.com/bufbuild/protocompile/internal/messageset +github.com/bufbuild/protocompile/linker +github.com/bufbuild/protocompile/options +github.com/bufbuild/protocompile/parser +github.com/bufbuild/protocompile/protoutil +github.com/bufbuild/protocompile/reporter +github.com/bufbuild/protocompile/sourceinfo +github.com/bufbuild/protocompile/walk # github.com/bytedance/sonic v1.8.0 ## explicit; go 1.15 github.com/bytedance/sonic @@ -173,13 +186,18 @@ github.com/gogo/protobuf/types # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp # github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 ## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.4.0 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.0 @@ -202,11 +220,21 @@ github.com/influxdata/influxdb1-client/v2 # github.com/influxdata/line-protocol/v2 v2.2.1 ## explicit; go 1.15 github.com/influxdata/line-protocol/v2/lineprotocol -# github.com/ip2location/ip2location-go v8.3.0+incompatible -## explicit # github.com/itchyny/timefmt-go v0.1.5 ## explicit; go 1.17 github.com/itchyny/timefmt-go +# github.com/jhump/protoreflect v1.16.0 +## explicit; go 1.19 +github.com/jhump/protoreflect/codec +github.com/jhump/protoreflect/desc +github.com/jhump/protoreflect/desc/internal +github.com/jhump/protoreflect/desc/protoparse +github.com/jhump/protoreflect/desc/protoparse/ast +github.com/jhump/protoreflect/desc/sourceinfo +github.com/jhump/protoreflect/dynamic +github.com/jhump/protoreflect/dynamic/grpcdynamic +github.com/jhump/protoreflect/internal +github.com/jhump/protoreflect/internal/codec # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go @@ -238,10 +266,6 @@ github.com/modern-go/reflect2 # github.com/mssola/user_agent v0.6.0 ## explicit; go 1.13 github.com/mssola/user_agent -# github.com/oschwald/geoip2-golang v1.9.0 -## explicit; go 1.19 -# github.com/oschwald/maxminddb-golang v1.11.0 -## explicit; go 1.19 # github.com/outcaste-io/ristretto v0.2.1 ## explicit; go 1.12 github.com/outcaste-io/ristretto @@ -366,8 +390,8 @@ go.uber.org/zap/zapcore # golang.org/x/arch v0.0.0-20210923205945-b76863e36670 ## explicit; go 1.17 golang.org/x/arch/x86/x86asm -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 +# golang.org/x/crypto v0.23.0 +## explicit; go 1.18 golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 ## explicit; go 1.20 @@ -376,8 +400,8 @@ golang.org/x/exp/slices # golang.org/x/mod v0.13.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.16.0 -## explicit; go 1.17 +# golang.org/x/net v0.25.0 +## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/html golang.org/x/net/html/atom @@ -390,20 +414,23 @@ golang.org/x/net/icmp golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket +golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 -# golang.org/x/sync v0.4.0 -## explicit; go 1.17 +golang.org/x/net/trace +# golang.org/x/sync v0.8.0 +## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +golang.org/x/sync/semaphore +# golang.org/x/sys v0.20.0 +## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.15.0 +## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -450,14 +477,74 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.11 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 +## explicit; go 1.19 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.61.0 +## explicit; go 1.19 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -479,8 +566,19 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/dynamicpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/apipb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/emptypb +google.golang.org/protobuf/types/known/fieldmaskpb +google.golang.org/protobuf/types/known/sourcecontextpb +google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/typepb +google.golang.org/protobuf/types/known/wrapperspb +google.golang.org/protobuf/types/pluginpb # gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 ## explicit; go 1.12 gopkg.in/CodapeWild/dd-trace-go.v1/ddtrace From d6b3c0f9c68e9eebe0380892f9c3ba7e60a54fe6 Mon Sep 17 00:00:00 2001 From: coanor Date: Thu, 18 Sep 2025 16:15:09 +0800 Subject: [PATCH 02/10] save --- dialtesting/greeter/b.sh | 3 + dialtesting/greeter/greeter.pb.go | 224 +++++++++++++++++++++++++ dialtesting/greeter/greeter.proto | 35 ++++ dialtesting/greeter/greeter_grpc.pb.go | 111 ++++++++++++ dialtesting/grpc.go | 22 ++- dialtesting/grpc_test.go | 70 ++++++++ 6 files changed, 456 insertions(+), 9 deletions(-) create mode 100644 dialtesting/greeter/b.sh create mode 100644 dialtesting/greeter/greeter.pb.go create mode 100644 dialtesting/greeter/greeter.proto create mode 100644 dialtesting/greeter/greeter_grpc.pb.go create mode 100644 dialtesting/grpc_test.go diff --git a/dialtesting/greeter/b.sh b/dialtesting/greeter/b.sh new file mode 100644 index 00000000..a2e329d6 --- /dev/null +++ b/dialtesting/greeter/b.sh @@ -0,0 +1,3 @@ +protoc --go_out=. --go_opt=paths=source_relative \ + --go-grpc_out=. --go-grpc_opt=paths=source_relative \ + greeter.proto diff --git a/dialtesting/greeter/greeter.pb.go b/dialtesting/greeter/greeter.pb.go new file mode 100644 index 00000000..b186b856 --- /dev/null +++ b/dialtesting/greeter/greeter.pb.go @@ -0,0 +1,224 @@ +// 指定使用 proto3 语法。如果你不写,默认是 proto2。 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v5.27.0 +// source: greeter.proto + +// 定义包名,这有助于防止不同 .proto 文件之间的命名冲突。 +// 在生成的 Go 代码中,这会成为包名的一部分。 + +package greeter + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// 定义请求消息 (Request Message)。 +// 它定义了调用 SayHello 方法时需要传递的数据结构。 +type HelloRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // 字段类型: string + // 字段名: name + // 字段编号: 1 (在一个 message 中,每个字段的编号必须是唯一的) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *HelloRequest) Reset() { + *x = HelloRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_greeter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HelloRequest) ProtoMessage() {} + +func (x *HelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_greeter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HelloRequest.ProtoReflect.Descriptor instead. +func (*HelloRequest) Descriptor() ([]byte, []int) { + return file_greeter_proto_rawDescGZIP(), []int{0} +} + +func (x *HelloRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// 定义响应消息 (Response Message)。 +// 它定义了 SayHello 方法成功返回时的数据结构。 +type HelloReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *HelloReply) Reset() { + *x = HelloReply{} + if protoimpl.UnsafeEnabled { + mi := &file_greeter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HelloReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HelloReply) ProtoMessage() {} + +func (x *HelloReply) ProtoReflect() protoreflect.Message { + mi := &file_greeter_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HelloReply.ProtoReflect.Descriptor instead. +func (*HelloReply) Descriptor() ([]byte, []int) { + return file_greeter_proto_rawDescGZIP(), []int{1} +} + +func (x *HelloReply) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_greeter_proto protoreflect.FileDescriptor + +var file_greeter_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x07, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x0c, 0x48, 0x65, 0x6c, 0x6c, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x26, 0x0a, 0x0a, + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x43, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x38, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x15, 0x2e, 0x67, 0x72, + 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x6c, + 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x21, 0x5a, 0x1f, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x79, 0x2d, 0x67, 0x72, 0x70, 0x63, + 0x2d, 0x61, 0x70, 0x70, 0x2f, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_greeter_proto_rawDescOnce sync.Once + file_greeter_proto_rawDescData = file_greeter_proto_rawDesc +) + +func file_greeter_proto_rawDescGZIP() []byte { + file_greeter_proto_rawDescOnce.Do(func() { + file_greeter_proto_rawDescData = protoimpl.X.CompressGZIP(file_greeter_proto_rawDescData) + }) + return file_greeter_proto_rawDescData +} + +var file_greeter_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_greeter_proto_goTypes = []interface{}{ + (*HelloRequest)(nil), // 0: greeter.HelloRequest + (*HelloReply)(nil), // 1: greeter.HelloReply +} +var file_greeter_proto_depIdxs = []int32{ + 0, // 0: greeter.Greeter.SayHello:input_type -> greeter.HelloRequest + 1, // 1: greeter.Greeter.SayHello:output_type -> greeter.HelloReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_greeter_proto_init() } +func file_greeter_proto_init() { + if File_greeter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_greeter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HelloRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_greeter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HelloReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_greeter_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_greeter_proto_goTypes, + DependencyIndexes: file_greeter_proto_depIdxs, + MessageInfos: file_greeter_proto_msgTypes, + }.Build() + File_greeter_proto = out.File + file_greeter_proto_rawDesc = nil + file_greeter_proto_goTypes = nil + file_greeter_proto_depIdxs = nil +} diff --git a/dialtesting/greeter/greeter.proto b/dialtesting/greeter/greeter.proto new file mode 100644 index 00000000..d1e6b71b --- /dev/null +++ b/dialtesting/greeter/greeter.proto @@ -0,0 +1,35 @@ +// 指定使用 proto3 语法。如果你不写,默认是 proto2。 +syntax = "proto3"; + +// 定义包名,这有助于防止不同 .proto 文件之间的命名冲突。 +// 在生成的 Go 代码中,这会成为包名的一部分。 +package greeter; + +// [重要] 为 Go 语言指定生成的包路径。 +// 你应该把它改成你自己项目的 Go module 路径。 +// 例如: "github.com/your_user/your_project/protos/greeter" +option go_package = "example.com/my-grpc-app/greeter"; + +// 定义服务 (Service)。服务可以看作是 RPC 方法的集合。 +// 客户端可以调用这些方法。 +service Greeter { + // 定义一个 RPC 方法,名为 SayHello。 + // 它接收一个 HelloRequest 消息作为参数, + // 并返回一个 HelloReply 消息。 + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// 定义请求消息 (Request Message)。 +// 它定义了调用 SayHello 方法时需要传递的数据结构。 +message HelloRequest { + // 字段类型: string + // 字段名: name + // 字段编号: 1 (在一个 message 中,每个字段的编号必须是唯一的) + string name = 1; +} + +// 定义响应消息 (Response Message)。 +// 它定义了 SayHello 方法成功返回时的数据结构。 +message HelloReply { + string message = 1; +} diff --git a/dialtesting/greeter/greeter_grpc.pb.go b/dialtesting/greeter/greeter_grpc.pb.go new file mode 100644 index 00000000..b609f868 --- /dev/null +++ b/dialtesting/greeter/greeter_grpc.pb.go @@ -0,0 +1,111 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v5.27.0 +// source: greeter.proto + +package greeter + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// GreeterClient is the client API for Greeter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GreeterClient interface { + // 定义一个 RPC 方法,名为 SayHello。 + // 它接收一个 HelloRequest 消息作为参数, + // 并返回一个 HelloReply 消息。 + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc grpc.ClientConnInterface +} + +func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := c.cc.Invoke(ctx, "/greeter.Greeter/SayHello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GreeterServer is the server API for Greeter service. +// All implementations must embed UnimplementedGreeterServer +// for forward compatibility +type GreeterServer interface { + // 定义一个 RPC 方法,名为 SayHello。 + // 它接收一个 HelloRequest 消息作为参数, + // 并返回一个 HelloReply 消息。 + SayHello(context.Context, *HelloRequest) (*HelloReply, error) + mustEmbedUnimplementedGreeterServer() +} + +// UnimplementedGreeterServer must be embedded to have forward compatible implementations. +type UnimplementedGreeterServer struct { +} + +func (UnimplementedGreeterServer) SayHello(context.Context, *HelloRequest) (*HelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SayHello not implemented") +} +func (UnimplementedGreeterServer) mustEmbedUnimplementedGreeterServer() {} + +// UnsafeGreeterServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GreeterServer will +// result in compilation errors. +type UnsafeGreeterServer interface { + mustEmbedUnimplementedGreeterServer() +} + +func RegisterGreeterServer(s grpc.ServiceRegistrar, srv GreeterServer) { + s.RegisterService(&Greeter_ServiceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/greeter.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Greeter_ServiceDesc is the grpc.ServiceDesc for Greeter service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Greeter_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "greeter.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "greeter.proto", +} diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 16ffeccb..3d8e1db3 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "log" "strings" pdesc "github.com/jhump/protoreflect/desc" @@ -23,7 +24,7 @@ type GRPCTask struct { *Task Server string `json:"server"` FullMethod string `json:"full_method"` - ProtoFiles map[string][]byte `json:"protofiles"` // user's multiple .proto files + ProtoFiles map[string]string `json:"protofiles"` // user's multiple .proto files JSONRequest []byte `json:"request"` // user's gRPC request are JSON bytes conn *grpc.ClientConn @@ -33,10 +34,7 @@ type GRPCTask struct { } func (t *GRPCTask) stop() { - if err := t.conn.Close(); err != nil { - return fmt.Errorf("gRPC connection close: %w", err) - } - return nil + t.conn.Close() } func (t *GRPCTask) init() error { @@ -55,10 +53,12 @@ func (t *GRPCTask) init() error { func (t *GRPCTask) findMethod() error { if len(t.ProtoFiles) == 0 { - return findMethodByReflection() + return t.findMethodByReflection() } if err := t.findMethodAmongProtofiles(); err != nil { + log.Printf("findMethodAmongProtofiles: %s", err.Error()) + if err := t.findMethodByReflection(); err != nil { return err } @@ -82,7 +82,7 @@ func (t *GRPCTask) findMethodAmongProtofiles() error { return err } - sepIdx := strings.LastIndex(t.FullMethod, ".") + sepIdx := strings.LastIndex(t.FullMethod, "/") if sepIdx == -1 { return fmt.Errorf("invalid FullMethod: %q", t.FullMethod) } @@ -90,6 +90,8 @@ func (t *GRPCTask) findMethodAmongProtofiles() error { service := t.FullMethod[:sepIdx] method := t.FullMethod[sepIdx+1:] + log.Printf("service: %s, method: %s", service, method) + //reg := &protoregistry.Files{} for _, fd := range desc { if sd := fd.FindService(service); sd != nil { @@ -102,9 +104,11 @@ func (t *GRPCTask) findMethodAmongProtofiles() error { if t.method == nil { return fmt.Errorf("method %s not found among proto files", method) } + + return nil } -func getFileNames(files map[string][]byte) []string { +func getFileNames(files map[string]string) []string { arr := make([]string, 0, len(files)) for k := range files { arr = append(arr, k) @@ -120,7 +124,7 @@ func (t *GRPCTask) run() error { } stub := grpcdynamic.NewStub(t.conn) - resp, err := stub.InvokeRpc(context.Background(), t.method, req) + resp, err := stub.InvokeRpc(context.Background(), t.method, msg) if err != nil { // dialtest failed return err diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go new file mode 100644 index 00000000..7e425fb2 --- /dev/null +++ b/dialtesting/grpc_test.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "context" + "encoding/json" + "log" + "net" + "os" + T "testing" + "time" + + pb "github.com/GuanceCloud/cliutils/dialtesting/greeter" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" +) + +type server struct { + pb.UnimplementedGreeterServer +} + +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + log.Printf("recv req from %v", in.GetName()) + return &pb.HelloReply{Message: "hello " + in.GetName()}, nil +} + +func TestGRPCDial(t *T.T) { + t.Run(`basic`, func(t *T.T) { + lsn, err := net.Listen("tcp", ":0") + assert.NoError(t, err) + + t.Logf("listen on %s", lsn.Addr().String()) + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + + go func() { + assert.NoError(t, s.Serve(lsn)) // start server + }() + + time.Sleep(time.Second) // wait + + proto, err := os.ReadFile("greeter/greeter.proto") + assert.NoError(t, err) + + hr := &pb.HelloRequest{ + Name: "world", + } + + j, err := json.Marshal(hr) + assert.NoError(t, err) + + task := &GRPCTask{ + Server: lsn.Addr().String(), + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": string(proto), + }, + JSONRequest: j, + } + + assert.NoError(t, task.init()) + assert.NoError(t, task.run()) + + t.Logf("result: %s", string(task.result)) + }) +} From 562a45670576ec98f48214d2ed30448a37f00ade Mon Sep 17 00:00:00 2001 From: coanor Date: Thu, 18 Sep 2025 17:52:14 +0800 Subject: [PATCH 03/10] add test on grpc check-health --- dialtesting/grpc.go | 2 +- dialtesting/grpc_test.go | 73 ++++- .../google.golang.org/grpc/health/client.go | 117 +++++++ .../grpc/health/grpc_health_v1/health.pb.go | 308 ++++++++++++++++++ .../health/grpc_health_v1/health_grpc.pb.go | 237 ++++++++++++++ .../google.golang.org/grpc/health/logging.go | 23 ++ .../google.golang.org/grpc/health/server.go | 163 +++++++++ vendor/modules.txt | 2 + 8 files changed, 915 insertions(+), 10 deletions(-) create mode 100644 vendor/google.golang.org/grpc/health/client.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/health/logging.go create mode 100644 vendor/google.golang.org/grpc/health/server.go diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 3d8e1db3..915d78e3 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -69,7 +69,7 @@ func (t *GRPCTask) findMethod() error { func (t *GRPCTask) findMethodByReflection() error { // TODO - return fmt.Errorf("TODO") + return nil } func (t *GRPCTask) findMethodAmongProtofiles() error { diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go index 7e425fb2..ed53745f 100644 --- a/dialtesting/grpc_test.go +++ b/dialtesting/grpc_test.go @@ -17,6 +17,8 @@ import ( pb "github.com/GuanceCloud/cliutils/dialtesting/greeter" "github.com/stretchr/testify/assert" "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" ) type server struct { @@ -29,20 +31,71 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe } func TestGRPCDial(t *T.T) { - t.Run(`basic`, func(t *T.T) { - lsn, err := net.Listen("tcp", ":0") + lsn, err := net.Listen("tcp", ":0") + assert.NoError(t, err) + + t.Logf("listen on %s", lsn.Addr().String()) + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + + healthSrv := health.NewServer() + + healthSrv.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) + + // we can set specific service's status + healthSrv.SetServingStatus("greeter.Greeter", grpc_health_v1.HealthCheckResponse_SERVING) + + grpc_health_v1.RegisterHealthServer(s, healthSrv) + + go func() { + assert.NoError(t, s.Serve(lsn)) // start server + }() + + time.Sleep(time.Second) // wait + + t.Run(`dial-on-health-check`, func(t *T.T) { + task := &GRPCTask{ + Server: lsn.Addr().String(), + } + + assert.NoError(t, task.init()) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + cli := grpc_health_v1.NewHealthClient(task.conn) + req := &grpc_health_v1.HealthCheckRequest{ + // set service name for specifi service + Service: "greeter.Greeter", + } + + resp, err := cli.Check(ctx, req) assert.NoError(t, err) + assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.GetStatus()) + }) + + t.Run(`dial-on-health-check(service-not-exist)`, func(t *T.T) { + task := &GRPCTask{ + Server: lsn.Addr().String(), + } + + assert.NoError(t, task.init()) - t.Logf("listen on %s", lsn.Addr().String()) - s := grpc.NewServer() - pb.RegisterGreeterServer(s, &server{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() - go func() { - assert.NoError(t, s.Serve(lsn)) // start server - }() + cli := grpc_health_v1.NewHealthClient(task.conn) + req := &grpc_health_v1.HealthCheckRequest{ + // the service not exist + Service: "greeter.SomeServiceNotExist", + } - time.Sleep(time.Second) // wait + resp, err := cli.Check(ctx, req) + assert.Error(t, err) + assert.Equal(t, grpc_health_v1.HealthCheckResponse_UNKNOWN, resp.GetStatus()) + }) + t.Run(`dial-on-proto-file(with-behavior)`, func(t *T.T) { proto, err := os.ReadFile("greeter/greeter.proto") assert.NoError(t, err) @@ -65,6 +118,8 @@ func TestGRPCDial(t *T.T) { assert.NoError(t, task.init()) assert.NoError(t, task.run()) + defer task.stop() + t.Logf("result: %s", string(task.result)) }) } diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 00000000..740745c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } + } +) + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting, nil) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready, nil) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready, nil) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready, nil) + } else { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000..24299efd --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,308 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. +) + +// Enum value maps for HealthCheckResponse_ServingStatus. +var ( + HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", + } + HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, + } +) + +func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { + p := new(HealthCheckResponse_ServingStatus) + *p = x + return p +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() +} + +func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { + return &file_grpc_health_v1_health_proto_enumTypes[0] +} + +func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} +} + +type HealthCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckRequest) ProtoMessage() {} + +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +type HealthCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckResponse) ProtoMessage() {} + +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if x != nil { + return x.Status + } + return HealthCheckResponse_UNKNOWN +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +var file_grpc_health_v1_health_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, + 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_health_v1_health_proto_rawDescOnce sync.Once + file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc +) + +func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { + file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + }) + return file_grpc_health_v1_health_proto_rawDescData +} + +var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_goTypes = []interface{}{ + (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus + (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest + (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse +} +var file_grpc_health_v1_health_proto_depIdxs = []int32{ + 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus + 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_grpc_health_v1_health_proto_init() } +func file_grpc_health_v1_health_proto_init() { + if File_grpc_health_v1_health_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_health_v1_health_proto_goTypes, + DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, + EnumInfos: file_grpc_health_v1_health_proto_enumTypes, + MessageInfos: file_grpc_health_v1_health_proto_msgTypes, + }.Build() + File_grpc_health_v1_health_proto = out.File + file_grpc_health_v1_health_proto_rawDesc = nil + file_grpc_health_v1_health_proto_goTypes = nil + file_grpc_health_v1_health_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000..4439cda0 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,237 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HealthClient interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility +type HealthServer interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HealthServer will +// result in compilation errors. +type UnsafeHealthServer interface { + mustEmbedUnimplementedHealthServer() +} + +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + s.RegisterService(&Health_ServiceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_Check_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go new file mode 100644 index 00000000..83c6acf5 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/logging.go @@ -0,0 +1,23 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import "google.golang.org/grpc/grpclog" + +var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 00000000..cce6312d --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + healthgrpc.UnimplementedHealthServer + mu sync.RWMutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6797b689..36cb83ea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -498,6 +498,8 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch From d8ea6cbf56b976996c792e37142ad525b20bc37d Mon Sep 17 00:00:00 2001 From: coanor Date: Thu, 18 Sep 2025 18:12:48 +0800 Subject: [PATCH 04/10] save: add gRPC reflection --- dialtesting/grpc.go | 37 +- dialtesting/grpc_test.go | 24 + .../jhump/protoreflect/grpcreflect/adapt.go | 137 +++ .../jhump/protoreflect/grpcreflect/client.go | 865 ++++++++++++++ .../jhump/protoreflect/grpcreflect/doc.go | 10 + .../jhump/protoreflect/grpcreflect/server.go | 67 ++ .../grpc/reflection/README.md | 18 + .../grpc/reflection/adapt.go | 187 +++ .../grpc_reflection_v1/reflection.pb.go | 953 +++++++++++++++ .../grpc_reflection_v1/reflection_grpc.pb.go | 164 +++ .../grpc_reflection_v1alpha/reflection.pb.go | 1028 +++++++++++++++++ .../reflection_grpc.pb.go | 161 +++ .../grpc/reflection/serverreflection.go | 360 ++++++ vendor/modules.txt | 4 + 14 files changed, 4012 insertions(+), 3 deletions(-) create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/client.go create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/doc.go create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/server.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/adapt.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 915d78e3..7854198d 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -16,8 +16,10 @@ import ( "github.com/jhump/protoreflect/desc/protoparse" "github.com/jhump/protoreflect/dynamic" "github.com/jhump/protoreflect/dynamic/grpcdynamic" + "github.com/jhump/protoreflect/grpcreflect" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) type GRPCTask struct { @@ -44,8 +46,10 @@ func (t *GRPCTask) init() error { } t.conn = conn - if err := t.findMethod(); err != nil { - return err + if t.FullMethod != "" { + if err := t.findMethod(); err != nil { + return err + } } return nil @@ -68,7 +72,34 @@ func (t *GRPCTask) findMethod() error { } func (t *GRPCTask) findMethodByReflection() error { - // TODO + rc := grpcreflect.NewClient(context.Background(), rpb.NewServerReflectionClient(t.conn)) + + slash := strings.LastIndex(t.FullMethod, "/") + if slash == -1 { + fmt.Errorf("invalid full method name: %s", t.FullMethod) + } + serviceName := t.FullMethod[:slash] + + // 使用 reflection client 解析服务 + fd, err := rc.FileContainingSymbol(serviceName) + if err != nil { + return err + } + + sd := fd.FindService(serviceName) + if sd == nil { + return fmt.Errorf("service %s not found", serviceName) + } + + methodName := t.FullMethod[slash+1:] + md := sd.FindMethodByName(methodName) + if md == nil { + return fmt.Errorf("method %s not found in service %s", methodName, serviceName) + } + + log.Printf("find method %q ok", t.FullMethod) + + t.method = md return nil } diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go index ed53745f..52bd403b 100644 --- a/dialtesting/grpc_test.go +++ b/dialtesting/grpc_test.go @@ -19,6 +19,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" ) type server struct { @@ -46,6 +47,7 @@ func TestGRPCDial(t *T.T) { healthSrv.SetServingStatus("greeter.Greeter", grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(s, healthSrv) + reflection.Register(s) go func() { assert.NoError(t, s.Serve(lsn)) // start server @@ -53,6 +55,28 @@ func TestGRPCDial(t *T.T) { time.Sleep(time.Second) // wait + t.Run(`dial-on-health-check(with-reflection)`, func(t *T.T) { + task := &GRPCTask{ + Server: lsn.Addr().String(), + FullMethod: "greeter.Greeter/SayHello", + } + + assert.NoError(t, task.init()) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + cli := grpc_health_v1.NewHealthClient(task.conn) + req := &grpc_health_v1.HealthCheckRequest{ + // set service name for specifi service + Service: "greeter.Greeter", + } + + resp, err := cli.Check(ctx, req) + assert.NoError(t, err) + assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.GetStatus()) + }) + t.Run(`dial-on-health-check`, func(t *T.T) { task := &GRPCTask{ Server: lsn.Addr().String(), diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go new file mode 100644 index 00000000..0d5615fe --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go @@ -0,0 +1,137 @@ +package grpcreflect + +import ( + refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1" + refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest { + var v1 refv1.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *refv1alpha.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *refv1alpha.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *refv1alpha.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &refv1.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *refv1alpha.ServerReflectionRequest_ListServices: + v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest { + var v1alpha refv1alpha.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *refv1.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *refv1.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *refv1.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &refv1alpha.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *refv1.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +func toV1AlphaResponse(v1 *refv1.ServerReflectionResponse) *refv1alpha.ServerReflectionResponse { + var v1alpha refv1alpha.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = toV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *refv1.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &refv1alpha.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *refv1.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &refv1alpha.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *refv1.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*refv1alpha.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &refv1alpha.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &refv1alpha.ListServiceResponse{ + Service: svcs, + }, + } + } + case *refv1.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &refv1alpha.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go new file mode 100644 index 00000000..cb6bf568 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go @@ -0,0 +1,865 @@ +package grpcreflect + +import ( + "bytes" + "context" + "fmt" + "io" + "reflect" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1" + refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/internal" +) + +// If we try the v1 reflection API and get back "not implemented", we'll wait +// this long before trying v1 again. This allows a long-lived client to +// dynamically switch from v1alpha to v1 if the underlying server is updated +// to support it. But it also prevents every stream request from always trying +// v1 first: if we try it and see it fail, we shouldn't continually retry it +// if we expect it will fail again. +const durationBetweenV1Attempts = time.Hour + +// elementNotFoundError is the error returned by reflective operations where the +// server does not recognize a given file name, symbol name, or extension. +type elementNotFoundError struct { + name string + kind elementKind + symType symbolType // only used when kind == elementKindSymbol + tag int32 // only used when kind == elementKindExtension + + // only errors with a kind of elementKindFile will have a cause, which means + // the named file count not be resolved because of a dependency that could + // not be found where cause describes the missing dependency + cause *elementNotFoundError +} + +type elementKind int + +const ( + elementKindSymbol elementKind = iota + elementKindFile + elementKindExtension +) + +type symbolType string + +const ( + symbolTypeService = "Service" + symbolTypeMessage = "Message" + symbolTypeEnum = "Enum" + symbolTypeUnknown = "Symbol" +) + +func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error { + if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol { + // no need to wrap + if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown { + // We previously didn't know symbol type but now do? + // Create a new error that has the right symbol type. + return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol} + } + return cause + } + return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause} +} + +func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error { + if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag { + // no need to wrap + return cause + } + return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause} +} + +func fileNotFound(file string, cause *elementNotFoundError) error { + if cause != nil && cause.kind == elementKindFile && cause.name == file { + // no need to wrap + return cause + } + return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause} +} + +func (e *elementNotFoundError) Error() string { + first := true + var b bytes.Buffer + for ; e != nil; e = e.cause { + if first { + first = false + } else { + _, _ = fmt.Fprint(&b, "\ncaused by: ") + } + switch e.kind { + case elementKindSymbol: + _, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name) + case elementKindExtension: + _, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name) + default: + _, _ = fmt.Fprintf(&b, "File not found: %s", e.name) + } + } + return b.String() +} + +// IsElementNotFoundError determines if the given error indicates that a file +// name, symbol name, or extension field was could not be found by the server. +func IsElementNotFoundError(err error) bool { + _, ok := err.(*elementNotFoundError) + return ok +} + +// ProtocolError is an error returned when the server sends a response of the +// wrong type. +type ProtocolError struct { + missingType reflect.Type +} + +func (p ProtocolError) Error() string { + return fmt.Sprintf("Protocol error: response was missing %v", p.missingType) +} + +type extDesc struct { + extendedMessageName string + extensionNumber int32 +} + +// Client is a client connection to a server for performing reflection calls +// and resolving remote symbols. +type Client struct { + ctx context.Context + now func() time.Time + stubV1 refv1.ServerReflectionClient + stubV1Alpha refv1alpha.ServerReflectionClient + allowMissing atomic.Bool + + connMu sync.Mutex + cancel context.CancelFunc + stream refv1alpha.ServerReflection_ServerReflectionInfoClient + useV1Alpha bool + lastTriedV1 time.Time + + cacheMu sync.RWMutex + protosByName map[string]*descriptorpb.FileDescriptorProto + filesByName map[string]*desc.FileDescriptor + filesBySymbol map[string]*desc.FileDescriptor + filesByExtension map[extDesc]*desc.FileDescriptor +} + +// NewClient creates a new Client with the given root context and using the +// given RPC stub for talking to the server. +// +// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the +// v1alpha version of the reflection service. Otherwise, use NewClientAuto +// instead. +func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client { + return NewClientV1Alpha(ctx, stub) +} + +// NewClientV1Alpha creates a new Client using the v1alpha version of reflection +// with the given root context and using the given RPC stub for talking to the +// server. +func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client { + return newClient(ctx, nil, stub) +} + +func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client { + cr := &Client{ + ctx: ctx, + now: time.Now, + stubV1: stubv1, + stubV1Alpha: stubv1alpha, + protosByName: map[string]*descriptorpb.FileDescriptorProto{}, + filesByName: map[string]*desc.FileDescriptor{}, + filesBySymbol: map[string]*desc.FileDescriptor{}, + filesByExtension: map[extDesc]*desc.FileDescriptor{}, + } + // don't leak a grpc stream + runtime.SetFinalizer(cr, (*Client).Reset) + return cr +} + +// NewClientAuto creates a new Client that will use either v1 or v1alpha version +// of reflection (based on what the server supports) with the given root context +// and using the given client connection. +// +// It will first the v1 version of the reflection service. If it gets back an +// "Unimplemented" error, it will fall back to using the v1alpha version. It +// will remember which version the server supports for any subsequent operations +// that need to re-invoke the streaming RPC. But, if it's a very long-lived +// client, it will periodically retry the v1 version (in case the server is +// updated to support it also). The period for these retries is every hour. +func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client { + stubv1 := refv1.NewServerReflectionClient(cc) + stubv1alpha := refv1alpha.NewServerReflectionClient(cc) + return newClient(ctx, stubv1, stubv1alpha) +} + +// AllowMissingFileDescriptors configures the client to allow missing files +// when building descriptors when possible. Missing files are often fatal +// errors, but with this option they can sometimes be worked around. Building +// a schema can only succeed with some files missing if the files in question +// only provide custom options and/or other unused types. +func (cr *Client) AllowMissingFileDescriptors() { + cr.allowMissing.Store(true) +} + +// TODO: We should also have a NewClientV1. However that should not refer to internal +// generated code. So it will have to wait until the grpc-go team fixes this issue: +// https://github.com/grpc/grpc-go/issues/5684 + +// FileByFilename asks the server for a file descriptor for the proto file with +// the given name. +func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + if fd, ok := cr.filesByName[filename]; ok { + cr.cacheMu.RUnlock() + return fd, nil + } + fdp, ok := cr.protosByName[filename] + cr.cacheMu.RUnlock() + // not there? see if we've downloaded the proto + if ok { + return cr.descriptorFromProto(fdp) + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: filename, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.GetName() == filename + } + + fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept) + if isNotFound(err) { + // file not found? see if we can look up via alternate name + if alternate, ok := internal.StdFileAliases[filename]; ok { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: alternate, + }, + } + fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept) + if isNotFound(err) { + err = fileNotFound(filename, nil) + } + } else { + err = fileNotFound(filename, nil) + } + } else if e, ok := err.(*elementNotFoundError); ok { + err = fileNotFound(filename, e) + } + return fd, err +} + +// FileContainingSymbol asks the server for a file descriptor for the proto file +// that declares the given fully-qualified symbol. +func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + fd, ok := cr.filesBySymbol[symbol] + cr.cacheMu.RUnlock() + if ok { + return fd, nil + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: symbol, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindSymbol(symbol) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) + if isNotFound(err) { + err = symbolNotFound(symbol, symbolTypeUnknown, nil) + } else if e, ok := err.(*elementNotFoundError); ok { + err = symbolNotFound(symbol, symbolTypeUnknown, e) + } + return fd, err +} + +// FileContainingExtension asks the server for a file descriptor for the proto +// file that declares an extension with the given number for the given +// fully-qualified message name. +func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) { + // hit the cache first + cr.cacheMu.RLock() + fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}] + cr.cacheMu.RUnlock() + if ok { + return fd, nil + } + + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &refv1alpha.ExtensionRequest{ + ContainingType: extendedMessageName, + ExtensionNumber: extensionNumber, + }, + }, + } + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindExtension(extendedMessageName, extensionNumber) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) + if isNotFound(err) { + err = extensionNotFound(extendedMessageName, extensionNumber, nil) + } else if e, ok := err.(*elementNotFoundError); ok { + err = extensionNotFound(extendedMessageName, extensionNumber, e) + } + return fd, err +} + +func (cr *Client) getAndCacheFileDescriptors(req *refv1alpha.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) { + resp, err := cr.send(req) + if err != nil { + return nil, err + } + + fdResp := resp.GetFileDescriptorResponse() + if fdResp == nil { + return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()} + } + + // Response can contain the result file descriptor, but also its transitive + // deps. Furthermore, protocol states that subsequent requests do not need + // to send transitive deps that have been sent in prior responses. So we + // need to cache all file descriptors and then return the first one (which + // should be the answer). If we're looking for a file by name, we can be + // smarter and make sure to grab one by name instead of just grabbing the + // first one. + var fds []*descriptorpb.FileDescriptorProto + for _, fdBytes := range fdResp.FileDescriptorProto { + fd := &descriptorpb.FileDescriptorProto{} + if err = proto.Unmarshal(fdBytes, fd); err != nil { + return nil, err + } + + if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName { + // we found a file was aliased, so we need to update the proto to reflect that + fd.Name = proto.String(alias) + } + + cr.cacheMu.Lock() + // store in cache of raw descriptor protos, but don't overwrite existing protos + if existingFd, ok := cr.protosByName[fd.GetName()]; ok { + fd = existingFd + } else { + cr.protosByName[fd.GetName()] = fd + } + cr.cacheMu.Unlock() + + fds = append(fds, fd) + } + + // find the right result from the files returned + for _, fd := range fds { + result, err := cr.descriptorFromProto(fd) + if err != nil { + return nil, err + } + if accept(result) { + return result, nil + } + } + + return nil, status.Errorf(codes.NotFound, "response does not include expected file") +} + +func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) { + allowMissing := cr.allowMissing.Load() + deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency())) + var deferredErr error + var missingDeps []int + for i, depName := range fd.GetDependency() { + if dep, err := cr.FileByFilename(depName); err != nil { + if _, ok := err.(*elementNotFoundError); !ok || !allowMissing { + return nil, err + } + // We'll ignore for now to see if the file is really necessary. + // (If it only supplies custom options, we can get by without it.) + if deferredErr == nil { + deferredErr = err + } + missingDeps = append(missingDeps, i) + } else { + deps = append(deps, dep) + } + } + if len(missingDeps) > 0 { + fd = fileWithoutDeps(fd, missingDeps) + } + d, err := desc.CreateFileDescriptor(fd, deps...) + if err != nil { + if deferredErr != nil { + // assume the issue is the missing dep + return nil, deferredErr + } + return nil, err + } + d = cr.cacheFile(d) + return d, nil +} + +func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor { + cr.cacheMu.Lock() + defer cr.cacheMu.Unlock() + + // cache file descriptor by name, but don't overwrite existing entry + // (existing entry could come from concurrent caller) + if existingFd, ok := cr.filesByName[fd.GetName()]; ok { + return existingFd + } + cr.filesByName[fd.GetName()] = fd + + // also cache by symbols and extensions + for _, m := range fd.GetMessageTypes() { + cr.cacheMessageLocked(fd, m) + } + for _, e := range fd.GetEnumTypes() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + for _, v := range e.GetValues() { + cr.filesBySymbol[v.GetFullyQualifiedName()] = fd + } + } + for _, e := range fd.GetExtensions() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd + } + for _, s := range fd.GetServices() { + cr.filesBySymbol[s.GetFullyQualifiedName()] = fd + for _, m := range s.GetMethods() { + cr.filesBySymbol[m.GetFullyQualifiedName()] = fd + } + } + + return fd +} + +func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) { + cr.filesBySymbol[md.GetFullyQualifiedName()] = fd + for _, f := range md.GetFields() { + cr.filesBySymbol[f.GetFullyQualifiedName()] = fd + } + for _, o := range md.GetOneOfs() { + cr.filesBySymbol[o.GetFullyQualifiedName()] = fd + } + for _, e := range md.GetNestedEnumTypes() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + for _, v := range e.GetValues() { + cr.filesBySymbol[v.GetFullyQualifiedName()] = fd + } + } + for _, e := range md.GetNestedExtensions() { + cr.filesBySymbol[e.GetFullyQualifiedName()] = fd + cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd + } + for _, m := range md.GetNestedMessageTypes() { + cr.cacheMessageLocked(fd, m) // recurse + } +} + +// AllExtensionNumbersForType asks the server for all known extension numbers +// for the given fully-qualified message name. +func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: extendedMessageName, + }, + } + resp, err := cr.send(req) + if err != nil { + if isNotFound(err) { + return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil) + } + return nil, err + } + + extResp := resp.GetAllExtensionNumbersResponse() + if extResp == nil { + return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()} + } + return extResp.ExtensionNumber, nil +} + +// ListServices asks the server for the fully-qualified names of all exposed +// services. +func (cr *Client) ListServices() ([]string, error) { + req := &refv1alpha.ServerReflectionRequest{ + MessageRequest: &refv1alpha.ServerReflectionRequest_ListServices{ + // proto doesn't indicate any purpose for this value and server impl + // doesn't actually use it... + ListServices: "*", + }, + } + resp, err := cr.send(req) + if err != nil { + return nil, err + } + + listResp := resp.GetListServicesResponse() + if listResp == nil { + return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()} + } + serviceNames := make([]string, len(listResp.Service)) + for i, s := range listResp.Service { + serviceNames[i] = s.Name + } + return serviceNames, nil +} + +func (cr *Client) send(req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + // we allow one immediate retry, in case we have a stale stream + // (e.g. closed by server) + resp, err := cr.doSend(req) + if err != nil { + return nil, err + } + + // convert error response messages into errors + errResp := resp.GetErrorResponse() + if errResp != nil { + return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage) + } + + return resp, nil +} + +func isNotFound(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + return ok && s.Code() == codes.NotFound +} + +func (cr *Client) doSend(req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + // TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery + // (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus + // delivered in correct oder. + cr.connMu.Lock() + defer cr.connMu.Unlock() + return cr.doSendLocked(0, nil, req) +} + +func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1alpha.ServerReflectionRequest) (*refv1alpha.ServerReflectionResponse, error) { + if attemptCount >= 3 && prevErr != nil { + return nil, prevErr + } + if (status.Code(prevErr) == codes.Unimplemented || + status.Code(prevErr) == codes.Unavailable) && + cr.useV1() { + // If v1 is unimplemented, fallback to v1alpha. + // We also fallback on unavailable because some servers have been + // observed to close the connection/cancel the stream, w/out sending + // back status or headers, when the service name is not known. When + // this happens, the RPC status code is unavailable. + // See https://github.com/fullstorydev/grpcurl/issues/434 + cr.useV1Alpha = true + cr.lastTriedV1 = cr.now() + } + attemptCount++ + + if err := cr.initStreamLocked(); err != nil { + return nil, err + } + + if err := cr.stream.Send(req); err != nil { + if err == io.EOF { + // if send returns EOF, must call Recv to get real underlying error + _, err = cr.stream.Recv() + } + cr.resetLocked() + return cr.doSendLocked(attemptCount, err, req) + } + + resp, err := cr.stream.Recv() + if err != nil { + cr.resetLocked() + return cr.doSendLocked(attemptCount, err, req) + } + return resp, nil +} + +func (cr *Client) initStreamLocked() error { + if cr.stream != nil { + return nil + } + var newCtx context.Context + newCtx, cr.cancel = context.WithCancel(cr.ctx) + if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts { + // we're due for periodic retry of v1 + cr.useV1Alpha = false + } + if cr.useV1() { + // try the v1 API + streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx) + if err == nil { + cr.stream = adaptStreamFromV1{streamv1} + return nil + } + if status.Code(err) != codes.Unimplemented { + return err + } + // oh well, fall through below to try v1alpha and update state + // so we skip straight to v1alpha next time + cr.useV1Alpha = true + cr.lastTriedV1 = cr.now() + } + var err error + cr.stream, err = cr.stubV1Alpha.ServerReflectionInfo(newCtx) + return err +} + +func (cr *Client) useV1() bool { + return !cr.useV1Alpha && cr.stubV1 != nil +} + +// Reset ensures that any active stream with the server is closed, releasing any +// resources. +func (cr *Client) Reset() { + cr.connMu.Lock() + defer cr.connMu.Unlock() + cr.resetLocked() +} + +func (cr *Client) resetLocked() { + if cr.stream != nil { + cr.stream.CloseSend() + for { + // drain the stream, this covers io.EOF too + if _, err := cr.stream.Recv(); err != nil { + break + } + } + cr.stream = nil + } + if cr.cancel != nil { + cr.cancel() + cr.cancel = nil + } +} + +// ResolveService asks the server to resolve the given fully-qualified service +// name into a service descriptor. +func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) { + file, err := cr.FileContainingSymbol(serviceName) + if err != nil { + return nil, setSymbolType(err, serviceName, symbolTypeService) + } + d := file.FindSymbol(serviceName) + if d == nil { + return nil, symbolNotFound(serviceName, symbolTypeService, nil) + } + if s, ok := d.(*desc.ServiceDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(serviceName, symbolTypeService, nil) + } +} + +// ResolveMessage asks the server to resolve the given fully-qualified message +// name into a message descriptor. +func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) { + file, err := cr.FileContainingSymbol(messageName) + if err != nil { + return nil, setSymbolType(err, messageName, symbolTypeMessage) + } + d := file.FindSymbol(messageName) + if d == nil { + return nil, symbolNotFound(messageName, symbolTypeMessage, nil) + } + if s, ok := d.(*desc.MessageDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(messageName, symbolTypeMessage, nil) + } +} + +// ResolveEnum asks the server to resolve the given fully-qualified enum name +// into an enum descriptor. +func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) { + file, err := cr.FileContainingSymbol(enumName) + if err != nil { + return nil, setSymbolType(err, enumName, symbolTypeEnum) + } + d := file.FindSymbol(enumName) + if d == nil { + return nil, symbolNotFound(enumName, symbolTypeEnum, nil) + } + if s, ok := d.(*desc.EnumDescriptor); ok { + return s, nil + } else { + return nil, symbolNotFound(enumName, symbolTypeEnum, nil) + } +} + +func setSymbolType(err error, name string, symType symbolType) error { + if e, ok := err.(*elementNotFoundError); ok { + if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown { + e.symType = symType + } + } + return err +} + +// ResolveEnumValues asks the server to resolve the given fully-qualified enum +// name into a map of names to numbers that represents the enum's values. +func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) { + enumDesc, err := cr.ResolveEnum(enumName) + if err != nil { + return nil, err + } + vals := map[string]int32{} + for _, valDesc := range enumDesc.GetValues() { + vals[valDesc.GetName()] = valDesc.GetNumber() + } + return vals, nil +} + +// ResolveExtension asks the server to resolve the given extension number and +// fully-qualified message name into a field descriptor. +func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) { + file, err := cr.FileContainingExtension(extendedType, extensionNumber) + if err != nil { + return nil, err + } + d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file}) + if d == nil { + return nil, extensionNotFound(extendedType, extensionNumber, nil) + } else { + return d, nil + } +} + +func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto { + // We need to rebuild the file without the missing deps. + fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto) + newNumDeps := len(fd.GetDependency()) - len(missingDeps) + newDeps := make([]string, 0, newNumDeps) + remapped := make(map[int]int, newNumDeps) + missingIdx := 0 + for i, dep := range fd.GetDependency() { + if missingIdx < len(missingDeps) { + if i == missingDeps[missingIdx] { + // This dep was missing. Skip it. + missingIdx++ + continue + } + } + remapped[i] = len(newDeps) + newDeps = append(newDeps, dep) + } + // Also rebuild public and weak import slices. + newPublic := make([]int32, 0, len(fd.GetPublicDependency())) + for _, idx := range fd.GetPublicDependency() { + newIdx, ok := remapped[int(idx)] + if ok { + newPublic = append(newPublic, int32(newIdx)) + } + } + newWeak := make([]int32, 0, len(fd.GetWeakDependency())) + for _, idx := range fd.GetWeakDependency() { + newIdx, ok := remapped[int(idx)] + if ok { + newWeak = append(newWeak, int32(newIdx)) + } + } + + fd.Dependency = newDeps + fd.PublicDependency = newPublic + fd.WeakDependency = newWeak + return fd +} + +func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor { + // search extensions in this scope + for _, ext := range scope.extensions() { + if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType { + return ext + } + } + + // if not found, search nested scopes + for _, nested := range scope.nestedScopes() { + ext := findExtension(extendedType, extensionNumber, nested) + if ext != nil { + return ext + } + } + + return nil +} + +type extensionScope interface { + extensions() []*desc.FieldDescriptor + nestedScopes() []extensionScope +} + +// fileDescriptorExtensions implements extensionHolder interface on top of +// FileDescriptorProto +type fileDescriptorExtensions struct { + proto *desc.FileDescriptor +} + +func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor { + return fde.proto.GetExtensions() +} + +func (fde fileDescriptorExtensions) nestedScopes() []extensionScope { + scopes := make([]extensionScope, len(fde.proto.GetMessageTypes())) + for i, m := range fde.proto.GetMessageTypes() { + scopes[i] = msgDescriptorExtensions{m} + } + return scopes +} + +// msgDescriptorExtensions implements extensionHolder interface on top of +// DescriptorProto +type msgDescriptorExtensions struct { + proto *desc.MessageDescriptor +} + +func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor { + return mde.proto.GetNestedExtensions() +} + +func (mde msgDescriptorExtensions) nestedScopes() []extensionScope { + scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes())) + for i, m := range mde.proto.GetNestedMessageTypes() { + scopes[i] = msgDescriptorExtensions{m} + } + return scopes +} + +type adaptStreamFromV1 struct { + refv1.ServerReflection_ServerReflectionInfoClient +} + +func (a adaptStreamFromV1) Send(request *refv1alpha.ServerReflectionRequest) error { + v1req := toV1Request(request) + return a.ServerReflection_ServerReflectionInfoClient.Send(v1req) +} + +func (a adaptStreamFromV1) Recv() (*refv1alpha.ServerReflectionResponse, error) { + v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv() + if err != nil { + return nil, err + } + return toV1AlphaResponse(v1resp), nil +} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go new file mode 100644 index 00000000..ec7bd029 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go @@ -0,0 +1,10 @@ +// Package grpcreflect provides GRPC-specific extensions to protobuf reflection. +// This includes a way to access rich service descriptors for all services that +// a GRPC server exports. +// +// Also included is an easy-to-use client for the GRPC reflection service +// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that +// supports the reflection service) for metadata on its exported services, which +// could be used to construct a dynamic client. (See the grpcdynamic package in +// this same repo for more on that.) +package grpcreflect diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go new file mode 100644 index 00000000..7ff19127 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go @@ -0,0 +1,67 @@ +package grpcreflect + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + + "github.com/jhump/protoreflect/desc" +) + +// GRPCServer is the interface provided by a gRPC server. In addition to being a +// service registrar (for registering services and handlers), it also has an +// accessor for retrieving metadata about all registered services. +type GRPCServer = reflection.GRPCServer + +// LoadServiceDescriptors loads the service descriptors for all services exposed by the +// given GRPC server. +func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) { + descs := map[string]*desc.ServiceDescriptor{} + for name, info := range s.GetServiceInfo() { + file, ok := info.Metadata.(string) + if !ok { + return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata) + } + fd, err := desc.LoadFileDescriptor(file) + if err != nil { + return nil, err + } + d := fd.FindSymbol(name) + if d == nil { + return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name) + } + sd, ok := d.(*desc.ServiceDescriptor) + if !ok { + return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d) + } + descs[name] = sd + } + return descs, nil +} + +// LoadServiceDescriptor loads a rich descriptor for a given service description +// generated by protoc-gen-go. Generated code contains an unexported symbol with +// a name like "__serviceDesc" which is the service's description. It +// is used internally to register a service implementation with a GRPC server. +// But it can also be used by this package to retrieve the rich descriptor for +// the service. +func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) { + file, ok := svc.Metadata.(string) + if !ok { + return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata) + } + fd, err := desc.LoadFileDescriptor(file) + if err != nil { + return nil, err + } + d := fd.FindSymbol(svc.ServiceName) + if d == nil { + return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName) + } + sd, ok := d.(*desc.ServiceDescriptor) + if !ok { + return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d) + } + return sd, nil +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000..9ace83cc --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 00000000..33b907a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 00000000..6f5c786b --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 00000000..62b56a8b --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000..69fbfb62 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,1028 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server set one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000..367a029b --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,161 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000..c3b40839 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,360 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. +func Register(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, + } +} + +type serverReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver +} + +// fileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + if fd.IsPlaceholder() { + // If the given root file is a placeholder, treat it + // as missing instead of serializing it. + return nil, protoregistry.NotFound + } + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if currentfd.IsPlaceholder() { + // Skip any missing files in the dependency graph. + continue + } + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &v1reflectionpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.listServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 36cb83ea..34735391 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -233,6 +233,7 @@ github.com/jhump/protoreflect/desc/protoparse/ast github.com/jhump/protoreflect/desc/sourceinfo github.com/jhump/protoreflect/dynamic github.com/jhump/protoreflect/dynamic/grpcdynamic +github.com/jhump/protoreflect/grpcreflect github.com/jhump/protoreflect/internal github.com/jhump/protoreflect/internal/codec # github.com/json-iterator/go v1.1.12 @@ -529,6 +530,9 @@ google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 +google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig From d7f0aa77b4e544e3541be44cae23fc9f2374f8da Mon Sep 17 00:00:00 2001 From: chenxing Date: Tue, 4 Nov 2025 16:09:48 +0800 Subject: [PATCH 05/10] feat: dialtesting grpc --- dialtesting/grpc.go | 290 +++++++++++++++++++++++++---- dialtesting/grpc_test.go | 391 +++++++++++++++++++++++++++++++++++++++ dialtesting/task.go | 4 + 3 files changed, 647 insertions(+), 38 deletions(-) diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 7854198d..31371ebd 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -6,11 +6,14 @@ package dialtesting import ( + "bufio" "context" "encoding/json" "fmt" - "log" + "net" + "path/filepath" "strings" + "time" pdesc "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/desc/protoparse" @@ -19,7 +22,15 @@ import ( "github.com/jhump/protoreflect/grpcreflect" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +var ( + _ TaskChild = (*GRPCTask)(nil) + _ ITask = (*GRPCTask)(nil) +) + +const ( + DefaultGRPCTimeout = 30 * time.Second ) type GRPCTask struct { @@ -28,18 +39,34 @@ type GRPCTask struct { FullMethod string `json:"full_method"` ProtoFiles map[string]string `json:"protofiles"` // user's multiple .proto files JSONRequest []byte `json:"request"` // user's gRPC request are JSON bytes + Timeout string `json:"timeout"` // request timeout, e.g., "30s", "1m" conn *grpc.ClientConn method *pdesc.MethodDescriptor - result []byte + result []byte + reqError string + reqCost time.Duration + timeout time.Duration } func (t *GRPCTask) stop() { - t.conn.Close() + if t.conn != nil { + t.conn.Close() + } } func (t *GRPCTask) init() error { + // parse timeout + t.timeout = DefaultGRPCTimeout + if t.Timeout != "" { + timeout, err := time.ParseDuration(t.Timeout) + if err != nil { + return fmt.Errorf("invalid timeout %q: %w", t.Timeout, err) + } + t.timeout = timeout + } + conn, err := grpc.Dial(t.Server, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return err @@ -47,7 +74,10 @@ func (t *GRPCTask) init() error { t.conn = conn if t.FullMethod != "" { - if err := t.findMethod(); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), t.timeout) + defer cancel() + if err := t.findMethod(ctx); err != nil { + t.conn.Close() return err } } @@ -55,32 +85,32 @@ func (t *GRPCTask) init() error { return nil } -func (t *GRPCTask) findMethod() error { - if len(t.ProtoFiles) == 0 { - return t.findMethodByReflection() - } - - if err := t.findMethodAmongProtofiles(); err != nil { - log.Printf("findMethodAmongProtofiles: %s", err.Error()) - - if err := t.findMethodByReflection(); err != nil { - return err +func (t *GRPCTask) findMethod(ctx context.Context) error { + if len(t.ProtoFiles) > 0 { + err := t.findMethodAmongProtofiles() + if err != nil { + return fmt.Errorf("find method via proto files: %w", err) } + return nil } + err := t.findMethodByReflection(ctx) + if err != nil { + return fmt.Errorf("find method via reflection: %w", err) + } return nil } -func (t *GRPCTask) findMethodByReflection() error { - rc := grpcreflect.NewClient(context.Background(), rpb.NewServerReflectionClient(t.conn)) +func (t *GRPCTask) findMethodByReflection(ctx context.Context) error { + rc := grpcreflect.NewClientAuto(ctx, t.conn) + defer rc.Reset() slash := strings.LastIndex(t.FullMethod, "/") if slash == -1 { - fmt.Errorf("invalid full method name: %s", t.FullMethod) + return fmt.Errorf("invalid full method name: %s", t.FullMethod) } serviceName := t.FullMethod[:slash] - // 使用 reflection client 解析服务 fd, err := rc.FileContainingSymbol(serviceName) if err != nil { return err @@ -96,47 +126,80 @@ func (t *GRPCTask) findMethodByReflection() error { if md == nil { return fmt.Errorf("method %s not found in service %s", methodName, serviceName) } - - log.Printf("find method %q ok", t.FullMethod) - t.method = md return nil } func (t *GRPCTask) findMethodAmongProtofiles() error { + // Extend map to match import paths with file names + extendedMap := make(map[string]string) + + for k, v := range t.ProtoFiles { + extendedMap[k] = v + extendedMap[filepath.Base(k)] = v + } + + // Parse imports and build mappings: for each import, find matching file by base name + for _, content := range t.ProtoFiles { + for _, imp := range extractImports(content) { + if extendedMap[imp] == "" { + importBase := filepath.Base(imp) + for filename, fileContent := range t.ProtoFiles { + if filepath.Base(filename) == importBase { + extendedMap[imp] = fileContent + break + } + } + } + } + } + p := protoparse.Parser{ - Accessor: protoparse.FileContentsFromMap(t.ProtoFiles), + Accessor: protoparse.FileContentsFromMap(extendedMap), + InferImportPaths: true, } desc, err := p.ParseFiles(getFileNames(t.ProtoFiles)...) if err != nil { - return err + return fmt.Errorf("parse proto files failed: %w", err) } sepIdx := strings.LastIndex(t.FullMethod, "/") if sepIdx == -1 { - return fmt.Errorf("invalid FullMethod: %q", t.FullMethod) + return fmt.Errorf("invalid fullMethod: %q", t.FullMethod) } service := t.FullMethod[:sepIdx] method := t.FullMethod[sepIdx+1:] - - log.Printf("service: %s, method: %s", service, method) - - //reg := &protoregistry.Files{} for _, fd := range desc { if sd := fd.FindService(service); sd != nil { if md := sd.FindMethodByName(method); md != nil { t.method = md + return nil } } } - if t.method == nil { - return fmt.Errorf("method %s not found among proto files", method) - } + return fmt.Errorf("method %s not found in service %s", t.FullMethod, service) +} - return nil +// extractImports extracts all import statements from proto file content +func extractImports(content string) []string { + var imports []string + scanner := bufio.NewScanner(strings.NewReader(content)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "import ") { + start := strings.Index(line, `"`) + if start != -1 { + end := strings.Index(line[start+1:], `"`) + if end != -1 { + imports = append(imports, line[start+1:start+1+end]) + } + } + } + } + return imports } func getFileNames(files map[string]string) []string { @@ -148,25 +211,176 @@ func getFileNames(files map[string]string) []string { } func (t *GRPCTask) run() error { + if t.method == nil { + t.reqError = "method not initialized" + return fmt.Errorf("method nil") + } + + start := time.Now() + defer func() { + t.reqCost = time.Since(start) + }() + + // create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), t.timeout) + defer cancel() + // create dynamic gRPC request msg := dynamic.NewMessage(t.method.GetInputType()) if err := msg.UnmarshalJSON(t.JSONRequest); err != nil { + t.reqError = fmt.Sprintf("invalid message: %v", err) return fmt.Errorf("invalid message for method %q: %w", t.method.GetName(), err) } stub := grpcdynamic.NewStub(t.conn) - resp, err := stub.InvokeRpc(context.Background(), t.method, msg) + resp, err := stub.InvokeRpc(ctx, t.method, msg) if err != nil { - // dialtest failed + t.reqError = err.Error() return err } // dial test message - if j, err := json.Marshal(resp); err != nil { + j, err := json.Marshal(resp) + if err != nil { + t.reqError = fmt.Sprintf("marshal response failed: %v", err) return err - } else { - t.result = j } + t.result = j return nil } + +func (t *GRPCTask) class() string { + return ClassGRPC +} + +func (t *GRPCTask) metricName() string { + return "grpc_dial_testing" +} + +func (t *GRPCTask) initTask() { + if t.Task == nil { + t.Task = &Task{} + } +} + +func (t *GRPCTask) check() error { + if t.Server == "" { + return fmt.Errorf("server address is required") + } + if t.FullMethod == "" { + return fmt.Errorf("full method is required") + } + return nil +} + +func (t *GRPCTask) clear() { + t.result = nil + t.reqError = "" + t.reqCost = 0 + if t.timeout == 0 { + t.timeout = DefaultGRPCTimeout + } +} + +func (t *GRPCTask) checkResult() ([]string, bool) { + if t.reqError != "" { + return []string{t.reqError}, false + } + if t.result == nil { + return []string{"no response"}, false + } + return nil, true +} + +func (t *GRPCTask) getResults() (tags map[string]string, fields map[string]interface{}) { + tags = map[string]string{ + "name": t.Name, + "server": t.Server, + "method": t.FullMethod, + "status": "FAIL", + "proto": "grpc", + } + + fields = map[string]interface{}{ + "response_time": int64(t.reqCost) / 1000, + "success": int64(-1), + } + + if hostnames, err := t.getHostName(); err == nil && len(hostnames) > 0 { + tags["dest_host"] = hostnames[0] + } + + for k, v := range t.Tags { + tags[k] = v + } + + message := map[string]interface{}{} + + reasons, succFlag := t.checkResult() + if t.reqError != "" { + reasons = append(reasons, t.reqError) + } + + if succFlag && t.reqError == "" { + tags["status"] = "OK" + fields["success"] = int64(1) + message["response_time"] = int64(t.reqCost) / 1000 + if t.result != nil { + message["response"] = string(t.result) + } + } else { + message["fail_reason"] = strings.Join(reasons, ";") + fields["fail_reason"] = strings.Join(reasons, ";") + } + + if t.result != nil { + fields["response"] = string(t.result) + } + + data, err := json.Marshal(message) + if err != nil { + fields["message"] = err.Error() + } else { + if len(data) > MaxMsgSize { + fields["message"] = string(data[:MaxMsgSize]) + } else { + fields["message"] = string(data) + } + } + + return tags, fields +} + +func (t *GRPCTask) beforeFirstRender() { +} + +func (t *GRPCTask) getVariableValue(variable Variable) (string, error) { + return "", fmt.Errorf("gRPC dial test does not support variable extraction") +} + +func (t *GRPCTask) getHostName() ([]string, error) { + if t.Server == "" { + return nil, fmt.Errorf("server address is empty") + } + + host, _, err := net.SplitHostPort(t.Server) + if err == nil { + return []string{host}, nil + } + + return []string{t.Server}, nil +} + +func (t *GRPCTask) getRawTask(taskString string) (string, error) { + task := GRPCTask{} + + if err := json.Unmarshal([]byte(taskString), &task); err != nil { + return "", fmt.Errorf("unmarshal grpc task failed: %w", err) + } + + task.Task = nil + + bytes, _ := json.Marshal(task) + return string(bytes), nil +} diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go index 52bd403b..f909dc9f 100644 --- a/dialtesting/grpc_test.go +++ b/dialtesting/grpc_test.go @@ -147,3 +147,394 @@ func TestGRPCDial(t *T.T) { t.Logf("result: %s", string(task.result)) }) } + +func TestGRPCTask_Check(t *T.T) { + t.Run("valid task", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + assert.NoError(t, task.check()) + }) + + t.Run("missing server", func(t *T.T) { + task := &GRPCTask{ + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "server address is required") + }) + + t.Run("missing full method", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + } + task.initTask() + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "full method is required") + }) +} + +func TestGRPCTask_Init(t *T.T) { + serverAddr := "localhost:50051" // Change to your server address + + t.Run("init with reflection", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.NotNil(t, task.conn) + assert.NotNil(t, task.method) + assert.Equal(t, DefaultGRPCTimeout, task.timeout) + + defer task.stop() + }) + + t.Run("init with proto files", func(t *T.T) { + proto, err := os.ReadFile("greeter/greeter.proto") + assert.NoError(t, err) + + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": string(proto), + }, + } + task.initTask() + + err = task.init() + assert.NoError(t, err) + assert.NotNil(t, task.conn) + assert.NotNil(t, task.method) + + defer task.stop() + }) + + t.Run("init with custom timeout", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + Timeout: "10s", + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + assert.Equal(t, 10*time.Second, task.timeout) + + defer task.stop() + }) + + t.Run("init with invalid timeout", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + Timeout: "invalid", + } + task.initTask() + + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid timeout") + }) + + t.Run("init with invalid server", func(t *T.T) { + task := &GRPCTask{ + Server: "invalid:99999", + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + + err := task.init() + assert.Error(t, err) + }) + + t.Run("init with method not found", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/NotFoundMethod", + } + task.initTask() + + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "not found") + }) +} + +func TestGRPCTask_Run(t *T.T) { + serverAddr := "localhost:50051" // Change to your server address + + t.Run("run success", func(t *T.T) { + greeterProto, err := os.ReadFile("greeter/greeter.proto") + assert.NoError(t, err) + userProto, err := os.ReadFile("greeter/user.proto") + assert.NoError(t, err) + + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHelloToUser", + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "user.proto": string(userProto), + }, + } + task.initTask() + + requestData := map[string]interface{}{ + "user_id": 1, + } + jsonRequest, err := json.Marshal(requestData) + assert.NoError(t, err) + task.JSONRequest = jsonRequest + + err = task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.NotNil(t, task.result) + assert.NotEmpty(t, task.result) + assert.Empty(t, task.reqError) + assert.Greater(t, task.reqCost, time.Duration(0)) + + defer task.stop() + }) + + t.Run("run without init", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + + err := task.run() + assert.Error(t, err) + assert.Contains(t, err.Error(), "method nil") + }) +} + +func TestGRPCTask_GetResults(t *T.T) { + serverAddr := "localhost:50051" // Change to your server address + + t.Run("success result", func(t *T.T) { + task := &GRPCTask{ + Task: &Task{ + Name: "test-task", + Tags: map[string]string{ + "env": "test", + }, + }, + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + + requestData := map[string]interface{}{"name": "test"} + jsonRequest, _ := json.Marshal(requestData) + task.JSONRequest = jsonRequest + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + tags, fields := task.getResults() + + // Check tags + assert.Equal(t, "test-task", tags["name"]) + assert.Equal(t, serverAddr, tags["server"]) + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + assert.Equal(t, "grpc", tags["proto"]) + assert.Equal(t, "test", tags["env"]) + + // Check fields + assert.Equal(t, int64(1), fields["success"]) + assert.Greater(t, fields["response_time"], int64(0)) + assert.NotNil(t, fields["response"]) + assert.NotNil(t, fields["message"]) + + defer task.stop() + }) + + t.Run("failure result", func(t *T.T) { + task := &GRPCTask{ + Task: &Task{ + Name: "test-task-fail", + }, + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + } + task.initTask() + + // Set invalid request to cause error + task.JSONRequest = []byte(`invalid json`) + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.Error(t, err) + + tags, fields := task.getResults() + + // Check tags + assert.Equal(t, "FAIL", tags["status"]) + + // Check fields + assert.Equal(t, int64(-1), fields["success"]) + assert.NotNil(t, fields["fail_reason"]) + + defer task.stop() + }) +} + +func TestGRPCTask_OtherMethods(t *T.T) { + t.Run("class", func(t *T.T) { + task := &GRPCTask{} + assert.Equal(t, ClassGRPC, task.class()) + }) + + t.Run("metricName", func(t *T.T) { + task := &GRPCTask{} + assert.Equal(t, "grpc_dial_testing", task.metricName()) + }) + + t.Run("initTask", func(t *T.T) { + task := &GRPCTask{} + task.initTask() + assert.NotNil(t, task.Task) + }) + + t.Run("clear", func(t *T.T) { + task := &GRPCTask{ + result: []byte("test"), + reqError: "error", + reqCost: 100 * time.Millisecond, + } + task.clear() + + assert.Nil(t, task.result) + assert.Empty(t, task.reqError) + assert.Equal(t, time.Duration(0), task.reqCost) + }) + + t.Run("checkResult", func(t *T.T) { + t.Run("success", func(t *T.T) { + task := &GRPCTask{ + result: []byte("test"), + } + reasons, flag := task.checkResult() + assert.Nil(t, reasons) + assert.True(t, flag) + }) + + t.Run("with error", func(t *T.T) { + task := &GRPCTask{ + reqError: "test error", + } + reasons, flag := task.checkResult() + assert.NotEmpty(t, reasons) + assert.False(t, flag) + }) + + t.Run("no response", func(t *T.T) { + task := &GRPCTask{} + reasons, flag := task.checkResult() + assert.NotEmpty(t, reasons) + assert.False(t, flag) + }) + }) + + t.Run("getHostName", func(t *T.T) { + t.Run("with port", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + } + hostnames, err := task.getHostName() + assert.NoError(t, err) + assert.Equal(t, []string{"localhost"}, hostnames) + }) + + t.Run("without port", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost", + } + hostnames, err := task.getHostName() + assert.NoError(t, err) + assert.Equal(t, []string{"localhost"}, hostnames) + }) + + t.Run("empty server", func(t *T.T) { + task := &GRPCTask{} + _, err := task.getHostName() + assert.Error(t, err) + }) + }) + + t.Run("getVariableValue", func(t *T.T) { + task := &GRPCTask{} + _, err := task.getVariableValue(Variable{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not support") + }) + + t.Run("getRawTask", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", + FullMethod: "greeter.Greeter/SayHello", + Timeout: "30s", + } + task.initTask() + + taskJSON, _ := json.Marshal(task) + rawTask, err := task.getRawTask(string(taskJSON)) + assert.NoError(t, err) + + var parsed GRPCTask + err = json.Unmarshal([]byte(rawTask), &parsed) + assert.NoError(t, err) + assert.Equal(t, task.Server, parsed.Server) + assert.Equal(t, task.FullMethod, parsed.FullMethod) + assert.Equal(t, task.Timeout, parsed.Timeout) + }) +} + +func TestGRPCTask_Timeout(t *T.T) { + serverAddr := "localhost:50051" // Change to your server address + + t.Run("timeout works", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + FullMethod: "greeter.Greeter/SayHello", + Timeout: "100ms", + } + task.initTask() + + requestData := map[string]interface{}{"name": "test"} + jsonRequest, _ := json.Marshal(requestData) + task.JSONRequest = jsonRequest + + err := task.init() + assert.NoError(t, err) + + // Test that timeout is set correctly + assert.Equal(t, 100*time.Millisecond, task.timeout) + + err = task.run() + assert.NoError(t, err) // Should succeed within timeout + + defer task.stop() + }) +} diff --git a/dialtesting/task.go b/dialtesting/task.go index 64c1a359..6057d4a3 100644 --- a/dialtesting/task.go +++ b/dialtesting/task.go @@ -25,6 +25,7 @@ const ( ClassTCP = "TCP" ClassWebsocket = "WEBSOCKET" ClassICMP = "ICMP" + ClassGRPC = "GRPC" ClassDNS = "DNS" ClassHeadless = "BROWSER" ClassOther = "OTHER" @@ -191,6 +192,9 @@ func CreateTaskChild(taskType string) (TaskChild, error) { case "icmp", ClassICMP: ct = &ICMPTask{} + case "grpc", ClassGRPC: + ct = &GRPCTask{} + default: return nil, fmt.Errorf("unknown task type %s", taskType) } From 4a2b59b856d00409b9c73d43f026e7e316f96f1e Mon Sep 17 00:00:00 2001 From: chenxing Date: Tue, 4 Nov 2025 16:22:33 +0800 Subject: [PATCH 06/10] feat: dialtesting grpc --- dialtesting/grpc.go | 50 +++++++++------- dialtesting/grpc_test.go | 123 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+), 22 deletions(-) diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 31371ebd..aa4ffca0 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -131,28 +131,7 @@ func (t *GRPCTask) findMethodByReflection(ctx context.Context) error { } func (t *GRPCTask) findMethodAmongProtofiles() error { - // Extend map to match import paths with file names - extendedMap := make(map[string]string) - - for k, v := range t.ProtoFiles { - extendedMap[k] = v - extendedMap[filepath.Base(k)] = v - } - - // Parse imports and build mappings: for each import, find matching file by base name - for _, content := range t.ProtoFiles { - for _, imp := range extractImports(content) { - if extendedMap[imp] == "" { - importBase := filepath.Base(imp) - for filename, fileContent := range t.ProtoFiles { - if filepath.Base(filename) == importBase { - extendedMap[imp] = fileContent - break - } - } - } - } - } + extendedMap := buildExtendedProtoMap(t.ProtoFiles) p := protoparse.Parser{ Accessor: protoparse.FileContentsFromMap(extendedMap), @@ -183,6 +162,33 @@ func (t *GRPCTask) findMethodAmongProtofiles() error { return fmt.Errorf("method %s not found in service %s", t.FullMethod, service) } +func buildExtendedProtoMap(protoFiles map[string]string) map[string]string { + extendedMap := make(map[string]string) + + // Add original files and their base names + for k, v := range protoFiles { + extendedMap[k] = v + extendedMap[filepath.Base(k)] = v + } + + // Parse imports and build mappings: for each import, find matching file by base name + for _, content := range protoFiles { + for _, imp := range extractImports(content) { + if extendedMap[imp] == "" { + importBase := filepath.Base(imp) + for filename, fileContent := range protoFiles { + if filepath.Base(filename) == importBase { + extendedMap[imp] = fileContent + break + } + } + } + } + } + + return extendedMap +} + // extractImports extracts all import statements from proto file content func extractImports(content string) []string { var imports []string diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go index f909dc9f..5f7243d4 100644 --- a/dialtesting/grpc_test.go +++ b/dialtesting/grpc_test.go @@ -538,3 +538,126 @@ func TestGRPCTask_Timeout(t *T.T) { defer task.stop() }) } + +func TestBuildExtendedProtoMap(t *T.T) { + t.Run("with import path matching", func(t *T.T) { + greeterProto := `syntax = "proto3"; +package greeter; +import "greeter/user.proto"; +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +}` + + userProto := `syntax = "proto3"; +package user; +message GetUserRequest { + int32 user_id = 1; +}` + + protoFiles := map[string]string{ + "greeter.proto": greeterProto, + "user.proto": userProto, + } + + extendedMap := buildExtendedProtoMap(protoFiles) + + // Check original files are preserved + assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) + assert.Equal(t, userProto, extendedMap["user.proto"]) + + // Check base names are added + assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) + assert.Equal(t, userProto, extendedMap["user.proto"]) + + // Check import path is mapped + assert.Equal(t, userProto, extendedMap["greeter/user.proto"], "import path should be mapped to user.proto content") + }) + + t.Run("with full path already in map", func(t *T.T) { + greeterProto := `syntax = "proto3"; +package greeter; +import "greeter/user.proto"; +service Greeter {} +` + + userProto := `syntax = "proto3"; +package user; +message GetUserRequest {} +` + + protoFiles := map[string]string{ + "greeter/greeter.proto": greeterProto, + "greeter/user.proto": userProto, + } + + extendedMap := buildExtendedProtoMap(protoFiles) + + // Check original paths are preserved + assert.Equal(t, greeterProto, extendedMap["greeter/greeter.proto"]) + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + + // Check base names are added + assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) + assert.Equal(t, userProto, extendedMap["user.proto"]) + + // Import path should already exist (no need to match) + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + }) + + t.Run("with multiple imports", func(t *T.T) { + mainProto := `syntax = "proto3"; +package main; +import "greeter/user.proto"; +import "greeter/common.proto"; +service Main {} +` + + userProto := `syntax = "proto3"; +package user; +message User {} +` + + commonProto := `syntax = "proto3"; +package common; +message Common {} +` + + protoFiles := map[string]string{ + "main.proto": mainProto, + "user.proto": userProto, + "common.proto": commonProto, + } + + extendedMap := buildExtendedProtoMap(protoFiles) + + // Check all imports are mapped + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + assert.Equal(t, commonProto, extendedMap["greeter/common.proto"]) + }) + + t.Run("with no imports", func(t *T.T) { + protoFiles := map[string]string{ + "simple.proto": `syntax = "proto3"; package simple;`, + } + + extendedMap := buildExtendedProtoMap(protoFiles) + + // If original key is already just filename, filepath.Base returns same value + // So map will have 1 entry (same key set twice) + assert.Equal(t, 1, len(extendedMap)) + assert.NotEmpty(t, extendedMap["simple.proto"]) + }) + + t.Run("with path in filename", func(t *T.T) { + protoFiles := map[string]string{ + "path/to/simple.proto": `syntax = "proto3"; package simple;`, + } + + extendedMap := buildExtendedProtoMap(protoFiles) + + // Should have 2 entries: original path and base name + assert.Equal(t, 2, len(extendedMap)) + assert.NotEmpty(t, extendedMap["path/to/simple.proto"]) + assert.NotEmpty(t, extendedMap["simple.proto"]) + }) +} From de5472f6c50e35a10c80745ffb511cbd2d731366 Mon Sep 17 00:00:00 2001 From: chenxing Date: Wed, 5 Nov 2025 14:23:02 +0800 Subject: [PATCH 07/10] feat: dialtesting grpc --- dialtesting/greeter/b.sh | 3 - dialtesting/greeter/greeter.pb.go | 224 - dialtesting/greeter/greeter.proto | 35 - dialtesting/greeter/greeter_grpc.pb.go | 111 - dialtesting/grpc.go | 895 ++- dialtesting/grpc_script.go | 140 + dialtesting/grpc_script_test.go | 141 + dialtesting/grpc_test.go | 1340 +++- dialtesting/grpcproto/common.proto | 10 + dialtesting/grpcproto/greeter.proto | 15 + dialtesting/task.go | 7 + go.mod | 29 +- go.sum | 62 +- .../bufbuild/protocompile/.golangci.yml | 40 +- .../bufbuild/protocompile/.protoc_version | 1 - .../github.com/bufbuild/protocompile/LICENSE | 2 +- .../github.com/bufbuild/protocompile/Makefile | 92 +- .../bufbuild/protocompile/ast/doc.go | 2 +- .../bufbuild/protocompile/ast/enum.go | 36 +- .../bufbuild/protocompile/ast/field.go | 259 +- .../bufbuild/protocompile/ast/file.go | 117 +- .../bufbuild/protocompile/ast/file_info.go | 37 +- .../bufbuild/protocompile/ast/identifiers.go | 11 +- .../bufbuild/protocompile/ast/message.go | 28 +- .../bufbuild/protocompile/ast/no_source.go | 81 +- .../bufbuild/protocompile/ast/node.go | 4 +- .../bufbuild/protocompile/ast/options.go | 98 +- .../bufbuild/protocompile/ast/ranges.go | 111 +- .../bufbuild/protocompile/ast/service.go | 32 +- .../bufbuild/protocompile/ast/values.go | 65 +- .../bufbuild/protocompile/ast/walk.go | 423 +- .../bufbuild/protocompile/compiler.go | 106 +- .../github.com/bufbuild/protocompile/doc.go | 2 +- .../github.com/bufbuild/protocompile/go.work | 3 +- .../bufbuild/protocompile/go.work.sum | 289 +- .../internal/editions/editions.go | 420 -- .../featuresext/cpp_features.protoset | Bin 605 -> 0 bytes .../internal/featuresext/featuresext.go | 84 - .../featuresext/java_features.protoset | Bin 856 -> 0 bytes .../protocompile/internal/message_context.go | 2 +- .../internal/messageset/messageset.go | 62 - .../bufbuild/protocompile/internal/norace.go | 2 +- .../bufbuild/protocompile/internal/options.go | 22 +- .../bufbuild/protocompile/internal/race.go | 2 +- .../bufbuild/protocompile/internal/tags.go | 107 +- .../bufbuild/protocompile/internal/types.go | 2 +- .../bufbuild/protocompile/internal/util.go | 127 +- .../protocompile/linker/descriptors.go | 720 ++- .../bufbuild/protocompile/linker/doc.go | 2 +- .../bufbuild/protocompile/linker/files.go | 116 +- .../bufbuild/protocompile/linker/linker.go | 44 +- .../protocompile/linker/pathkey_no_unsafe.go | 2 +- .../protocompile/linker/pathkey_unsafe.go | 6 +- .../bufbuild/protocompile/linker/pool.go | 131 - .../bufbuild/protocompile/linker/resolve.go | 308 +- .../bufbuild/protocompile/linker/symbols.go | 380 +- .../bufbuild/protocompile/linker/validate.go | 970 +-- .../bufbuild/protocompile/options/options.go | 2334 +++---- .../options/source_retention_options.go | 539 -- .../protocompile/options/target_types.go | 152 - .../bufbuild/protocompile/parser/ast.go | 230 +- .../bufbuild/protocompile/parser/clone.go | 3 +- .../bufbuild/protocompile/parser/doc.go | 2 +- .../bufbuild/protocompile/parser/errors.go | 2 +- .../bufbuild/protocompile/parser/lexer.go | 17 +- .../bufbuild/protocompile/parser/parser.go | 20 +- .../bufbuild/protocompile/parser/proto.y | 1397 ++--- .../bufbuild/protocompile/parser/proto.y.go | 2661 ++++---- .../bufbuild/protocompile/parser/result.go | 320 +- .../bufbuild/protocompile/parser/validate.go | 213 +- .../protocompile/protoutil/editions.go | 140 - .../bufbuild/protocompile/protoutil/protos.go | 9 +- .../bufbuild/protocompile/reporter/errors.go | 32 +- .../protocompile/reporter/reporter.go | 31 +- .../bufbuild/protocompile/resolver.go | 23 +- .../sourceinfo/source_code_info.go | 318 +- .../bufbuild/protocompile/std_imports.go | 38 +- .../protocompile/supported_editions.go | 30 - .../bufbuild/protocompile/walk/walk.go | 119 +- vendor/github.com/go-ping/ping/.editorconfig | 16 - vendor/github.com/go-ping/ping/.gitignore | 2 - vendor/github.com/go-ping/ping/.golangci.yml | 6 - .../github.com/go-ping/ping/.goreleaser.yml | 46 - .../github.com/go-ping/ping/CONTRIBUTING.md | 44 - vendor/github.com/go-ping/ping/LICENSE | 21 - vendor/github.com/go-ping/ping/Makefile | 32 - vendor/github.com/go-ping/ping/README.md | 141 - vendor/github.com/go-ping/ping/logger.go | 53 - vendor/github.com/go-ping/ping/packetconn.go | 103 - vendor/github.com/go-ping/ping/ping.go | 820 --- vendor/github.com/go-ping/ping/utils_linux.go | 20 - vendor/github.com/go-ping/ping/utils_other.go | 17 - .../github.com/go-ping/ping/utils_windows.go | 25 - .../golang/protobuf/jsonpb/decode.go | 1 - .../golang/protobuf/jsonpb/encode.go | 1 - .../github.com/golang/protobuf/ptypes/any.go | 7 +- vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CHANGELOG.md | 21 - vendor/github.com/google/uuid/CONTRIBUTING.md | 16 - vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/uuid.go | 36 +- .../jhump/protoreflect/desc/cache.go | 9 + .../jhump/protoreflect/desc/convert.go | 4 +- .../jhump/protoreflect/desc/descriptor.go | 12 - .../protoreflect/desc/internal/registry.go | 23 +- .../jhump/protoreflect/desc/internal/util.go | 3 - .../jhump/protoreflect/desc/load.go | 7 + .../jhump/protoreflect/desc/protoparse/ast.go | 14 +- .../protoreflect/desc/protoparse/ast/file.go | 2 - .../desc/protoparse/ast/values.go | 6 - .../protoreflect/desc/protoparse/ast/walk.go | 5 - .../protoreflect/desc/protoparse/errors.go | 9 +- .../protoreflect/desc/protoparse/parser.go | 242 +- .../desc/protoparse/test-source-info.txt | 28 +- .../protoreflect/desc/sourceinfo/wrappers.go | 9 +- .../jhump/protoreflect/desc/wrap.go | 21 +- .../jhump/protoreflect/grpcreflect/adapt.go | 137 - .../jhump/protoreflect/grpcreflect/client.go | 124 +- .../grpc_reflection_v1/reflection.pb.go | 389 +- .../grpc_reflection_v1/reflection.proto | 150 + .../grpc_reflection_v1/reflection_grpc.pb.go | 39 +- .../internal/grpc_reflection_v1/svc_impl.go | 240 + .../prometheus/util/strutil/strconv.go | 21 - vendor/github.com/tidwall/tinylru/LICENSE | 19 - vendor/github.com/tidwall/tinylru/README.md | 54 - vendor/github.com/tidwall/tinylru/lru.go | 208 - vendor/github.com/tidwall/wal/.gitignore | 3 - vendor/github.com/tidwall/wal/LICENSE | 20 - vendor/github.com/tidwall/wal/README.md | 79 - vendor/github.com/tidwall/wal/wal.go | 917 --- .../x/crypto/sha3/hashes_generic.go | 1 + vendor/golang.org/x/crypto/sha3/keccakf.go | 1 + .../golang.org/x/crypto/sha3/keccakf_amd64.go | 1 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5 +- vendor/golang.org/x/crypto/sha3/register.go | 1 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 20 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 1 + .../golang.org/x/crypto/sha3/shake_generic.go | 1 + vendor/golang.org/x/crypto/sha3/xor.go | 1 + .../golang.org/x/crypto/sha3/xor_unaligned.go | 2 + vendor/golang.org/x/net/html/doc.go | 2 +- vendor/golang.org/x/net/html/token.go | 12 +- .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/databuffer.go | 59 +- vendor/golang.org/x/net/http2/frame.go | 51 +- vendor/golang.org/x/net/http2/go111.go | 30 + vendor/golang.org/x/net/http2/go115.go | 27 + vendor/golang.org/x/net/http2/go118.go | 17 + vendor/golang.org/x/net/http2/not_go111.go | 21 + vendor/golang.org/x/net/http2/not_go115.go | 31 + vendor/golang.org/x/net/http2/not_go118.go | 17 + vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 114 +- vendor/golang.org/x/net/http2/testsync.go | 331 - vendor/golang.org/x/net/http2/transport.go | 355 +- vendor/golang.org/x/net/icmp/helper_posix.go | 1 + vendor/golang.org/x/net/icmp/listen_posix.go | 1 + vendor/golang.org/x/net/icmp/listen_stub.go | 1 + vendor/golang.org/x/net/idna/go118.go | 1 + vendor/golang.org/x/net/idna/idna10.0.0.go | 1 + vendor/golang.org/x/net/idna/idna9.0.0.go | 1 + vendor/golang.org/x/net/idna/pre_go118.go | 1 + vendor/golang.org/x/net/idna/tables10.0.0.go | 1 + vendor/golang.org/x/net/idna/tables11.0.0.go | 1 + vendor/golang.org/x/net/idna/tables12.0.0.go | 1 + vendor/golang.org/x/net/idna/tables13.0.0.go | 1 + vendor/golang.org/x/net/idna/tables15.0.0.go | 1 + vendor/golang.org/x/net/idna/tables9.0.0.go | 1 + vendor/golang.org/x/net/idna/trie12.0.0.go | 1 + vendor/golang.org/x/net/idna/trie13.0.0.go | 1 + .../x/net/internal/socket/cmsghdr.go | 1 + .../x/net/internal/socket/cmsghdr_bsd.go | 1 + .../internal/socket/cmsghdr_linux_32bit.go | 2 + .../internal/socket/cmsghdr_linux_64bit.go | 2 + .../internal/socket/cmsghdr_solaris_64bit.go | 1 + .../x/net/internal/socket/cmsghdr_stub.go | 1 + .../x/net/internal/socket/cmsghdr_unix.go | 1 + .../net/internal/socket/complete_dontwait.go | 1 + .../internal/socket/complete_nodontwait.go | 1 + .../golang.org/x/net/internal/socket/empty.s | 1 + .../x/net/internal/socket/error_unix.go | 1 + .../x/net/internal/socket/iovec_32bit.go | 2 + .../x/net/internal/socket/iovec_64bit.go | 2 + .../internal/socket/iovec_solaris_64bit.go | 1 + .../x/net/internal/socket/iovec_stub.go | 1 + .../x/net/internal/socket/mmsghdr_stub.go | 1 + .../x/net/internal/socket/mmsghdr_unix.go | 1 + .../x/net/internal/socket/msghdr_bsd.go | 1 + .../x/net/internal/socket/msghdr_bsdvar.go | 1 + .../net/internal/socket/msghdr_linux_32bit.go | 2 + .../net/internal/socket/msghdr_linux_64bit.go | 2 + .../internal/socket/msghdr_solaris_64bit.go | 1 + .../x/net/internal/socket/msghdr_stub.go | 1 + .../x/net/internal/socket/msghdr_zos_s390x.go | 1 + .../x/net/internal/socket/norace.go | 1 + .../golang.org/x/net/internal/socket/race.go | 1 + .../x/net/internal/socket/rawconn_mmsg.go | 1 + .../x/net/internal/socket/rawconn_msg.go | 1 + .../x/net/internal/socket/rawconn_nommsg.go | 1 + .../x/net/internal/socket/rawconn_nomsg.go | 1 + .../x/net/internal/socket/sys_bsd.go | 1 + .../x/net/internal/socket/sys_const_unix.go | 1 + .../x/net/internal/socket/sys_linux.go | 1 + .../net/internal/socket/sys_linux_loong64.go | 1 + .../net/internal/socket/sys_linux_riscv64.go | 1 + .../x/net/internal/socket/sys_posix.go | 1 + .../x/net/internal/socket/sys_stub.go | 1 + .../x/net/internal/socket/sys_unix.go | 1 + .../x/net/internal/socket/zsys_aix_ppc64.go | 1 + .../net/internal/socket/zsys_linux_loong64.go | 1 + .../net/internal/socket/zsys_linux_riscv64.go | 1 + vendor/golang.org/x/net/ipv4/control_bsd.go | 1 + .../golang.org/x/net/ipv4/control_pktinfo.go | 1 + vendor/golang.org/x/net/ipv4/control_stub.go | 1 + vendor/golang.org/x/net/ipv4/control_unix.go | 1 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 1 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 1 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 1 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 1 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_aix.go | 1 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 1 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 1 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 1 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 1 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 1 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_stub.go | 1 + .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 1 + .../x/net/ipv4/zsys_linux_loong64.go | 1 + .../x/net/ipv4/zsys_linux_riscv64.go | 1 + .../x/net/ipv6/control_rfc2292_unix.go | 1 + .../x/net/ipv6/control_rfc3542_unix.go | 1 + vendor/golang.org/x/net/ipv6/control_stub.go | 1 + vendor/golang.org/x/net/ipv6/control_unix.go | 1 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 1 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 1 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 1 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 1 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 1 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 1 + vendor/golang.org/x/net/ipv6/sys_aix.go | 1 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 1 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 1 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 1 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 1 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 1 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 1 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 1 + vendor/golang.org/x/net/ipv6/sys_stub.go | 1 + .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 1 + .../x/net/ipv6/zsys_linux_loong64.go | 1 + .../x/net/ipv6/zsys_linux_riscv64.go | 1 + vendor/golang.org/x/sync/LICENSE | 4 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 135 - vendor/golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - .../golang.org/x/sync/semaphore/semaphore.go | 42 +- vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 1 + vendor/golang.org/x/sys/cpu/cpu.go | 1 - vendor/golang.org/x/sys/cpu/cpu_aix.go | 1 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 10 - vendor/golang.org/x/sys/cpu/cpu_arm64.s | 9 +- vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 1 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 2 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 1 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 2 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 2 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 2 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 1 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 2 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 1 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 1 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 1 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 1 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 2 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 3 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 1 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 1 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 1 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 1 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 1 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 1 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 2 + vendor/golang.org/x/sys/cpu/endian_big.go | 1 + vendor/golang.org/x/sys/cpu/endian_little.go | 1 + .../x/sys/cpu/proc_cpuinfo_linux.go | 1 + .../x/sys/cpu/runtime_auxv_go121.go | 1 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 1 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 1 + .../golang.org/x/sys/execabs/execabs_go118.go | 1 + .../golang.org/x/sys/execabs/execabs_go119.go | 1 + vendor/golang.org/x/sys/unix/aliases.go | 4 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 + vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 + vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 + vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 + vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 + vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 + .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 + .../x/sys/unix/asm_openbsd_mips64.s | 1 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 668 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 -- vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 + vendor/golang.org/x/sys/unix/constants.go | 1 + vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 + vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 + vendor/golang.org/x/sys/unix/dev_zos.go | 1 + vendor/golang.org/x/sys/unix/dirent.go | 1 + vendor/golang.org/x/sys/unix/endian_big.go | 1 + vendor/golang.org/x/sys/unix/endian_little.go | 1 + vendor/golang.org/x/sys/unix/env_unix.go | 1 + vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 + vendor/golang.org/x/sys/unix/fdset.go | 1 + vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 1 + vendor/golang.org/x/sys/unix/gccgo_c.c | 1 + .../x/sys/unix/gccgo_linux_amd64.go | 1 + vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 - vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 + .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 + vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 + vendor/golang.org/x/sys/unix/mkerrors.sh | 43 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 3 +- vendor/golang.org/x/sys/unix/mremap.go | 1 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 3 +- .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 + vendor/golang.org/x/sys/unix/race.go | 1 + vendor/golang.org/x/sys/unix/race0.go | 1 + .../x/sys/unix/readdirent_getdents.go | 1 + .../x/sys/unix/readdirent_getdirentries.go | 3 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 + .../x/sys/unix/sockcmsg_unix_other.go | 1 + vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 - .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 - vendor/golang.org/x/sys/unix/syscall.go | 1 + vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 + .../x/sys/unix/syscall_aix_ppc64.go | 1 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_darwin_libSystem.go | 3 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- .../x/sys/unix/syscall_freebsd_386.go | 1 + .../x/sys/unix/syscall_freebsd_amd64.go | 1 + .../x/sys/unix/syscall_freebsd_arm.go | 1 + .../x/sys/unix/syscall_freebsd_arm64.go | 1 + .../x/sys/unix/syscall_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 + .../golang.org/x/sys/unix/syscall_illumos.go | 1 + vendor/golang.org/x/sys/unix/syscall_linux.go | 132 +- .../x/sys/unix/syscall_linux_386.go | 1 + .../x/sys/unix/syscall_linux_alarm.go | 2 + .../x/sys/unix/syscall_linux_amd64.go | 1 + .../x/sys/unix/syscall_linux_amd64_gc.go | 1 + .../x/sys/unix/syscall_linux_arm.go | 1 + .../x/sys/unix/syscall_linux_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 + .../x/sys/unix/syscall_linux_gc_386.go | 1 + .../x/sys/unix/syscall_linux_gc_arm.go | 1 + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 + .../x/sys/unix/syscall_linux_loong64.go | 1 + .../x/sys/unix/syscall_linux_mips64x.go | 2 + .../x/sys/unix/syscall_linux_mipsx.go | 2 + .../x/sys/unix/syscall_linux_ppc.go | 1 + .../x/sys/unix/syscall_linux_ppc64x.go | 2 + .../x/sys/unix/syscall_linux_riscv64.go | 1 + .../x/sys/unix/syscall_linux_s390x.go | 1 + .../x/sys/unix/syscall_linux_sparc64.go | 1 + .../x/sys/unix/syscall_netbsd_386.go | 1 + .../x/sys/unix/syscall_netbsd_amd64.go | 1 + .../x/sys/unix/syscall_netbsd_arm.go | 1 + .../x/sys/unix/syscall_netbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 28 +- .../x/sys/unix/syscall_openbsd_386.go | 1 + .../x/sys/unix/syscall_openbsd_amd64.go | 1 + .../x/sys/unix/syscall_openbsd_arm.go | 1 + .../x/sys/unix/syscall_openbsd_arm64.go | 1 + .../x/sys/unix/syscall_openbsd_libc.go | 1 + .../x/sys/unix/syscall_openbsd_ppc64.go | 1 + .../x/sys/unix/syscall_openbsd_riscv64.go | 1 + .../golang.org/x/sys/unix/syscall_solaris.go | 5 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 1 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 + .../x/sys/unix/syscall_zos_s390x.go | 1500 +---- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 3 +- .../x/sys/unix/sysvshm_unix_other.go | 3 +- vendor/golang.org/x/sys/unix/timestruct.go | 1 + .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 + .../x/sys/unix/zerrors_aix_ppc64.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 1 + .../x/sys/unix/zerrors_darwin_arm64.go | 1 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_386.go | 1 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_arm.go | 1 + .../x/sys/unix/zerrors_freebsd_arm64.go | 1 + .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 113 +- .../x/sys/unix/zerrors_linux_386.go | 4 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 4 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_loong64.go | 5 +- .../x/sys/unix/zerrors_linux_mips.go | 4 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- .../x/sys/unix/zerrors_linux_ppc.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_riscv64.go | 7 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1 + .../x/sys/unix/zerrors_netbsd_arm.go | 1 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 1 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1 + .../x/sys/unix/zerrors_openbsd_arm.go | 1 + .../x/sys/unix/zerrors_openbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1 + .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 + .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 + .../x/sys/unix/zerrors_solaris_amd64.go | 1 + .../x/sys/unix/zerrors_zos_s390x.go | 234 +- .../x/sys/unix/zptrace_armnn_linux.go | 2 + .../x/sys/unix/zptrace_mipsnn_linux.go | 2 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 + .../x/sys/unix/zptrace_x86_linux.go | 2 + .../x/sys/unix/zsymaddr_zos_s390x.s | 364 -- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 + .../x/sys/unix/zsyscall_darwin_amd64.go | 1 + .../x/sys/unix/zsyscall_darwin_arm64.go | 1 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 + .../x/sys/unix/zsyscall_freebsd_386.go | 1 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 + .../x/sys/unix/zsyscall_illumos_amd64.go | 1 + .../golang.org/x/sys/unix/zsyscall_linux.go | 36 +- .../x/sys/unix/zsyscall_linux_386.go | 1 + .../x/sys/unix/zsyscall_linux_amd64.go | 1 + .../x/sys/unix/zsyscall_linux_arm.go | 1 + .../x/sys/unix/zsyscall_linux_arm64.go | 1 + .../x/sys/unix/zsyscall_linux_loong64.go | 1 + .../x/sys/unix/zsyscall_linux_mips.go | 1 + .../x/sys/unix/zsyscall_linux_mips64.go | 1 + .../x/sys/unix/zsyscall_linux_mips64le.go | 1 + .../x/sys/unix/zsyscall_linux_mipsle.go | 1 + .../x/sys/unix/zsyscall_linux_ppc.go | 1 + .../x/sys/unix/zsyscall_linux_ppc64.go | 1 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 + .../x/sys/unix/zsyscall_linux_riscv64.go | 1 + .../x/sys/unix/zsyscall_linux_s390x.go | 1 + .../x/sys/unix/zsyscall_linux_sparc64.go | 1 + .../x/sys/unix/zsyscall_netbsd_386.go | 1 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 + .../x/sys/unix/zsyscall_openbsd_386.go | 70 +- .../x/sys/unix/zsyscall_openbsd_386.s | 20 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 20 - .../x/sys/unix/zsyscall_openbsd_arm.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 20 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 20 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 20 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 24 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 20 - .../x/sys/unix/zsyscall_solaris_amd64.go | 1 + .../x/sys/unix/zsyscall_zos_s390x.go | 3112 ++-------- .../x/sys/unix/zsysctl_openbsd_386.go | 1 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 + .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 + .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_386.go | 1 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 + .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_386.go | 11 +- .../x/sys/unix/zsysnum_linux_amd64.go | 11 +- .../x/sys/unix/zsysnum_linux_arm.go | 11 +- .../x/sys/unix/zsysnum_linux_arm64.go | 11 +- .../x/sys/unix/zsysnum_linux_loong64.go | 11 +- .../x/sys/unix/zsysnum_linux_mips.go | 11 +- .../x/sys/unix/zsysnum_linux_mips64.go | 11 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 11 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 11 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 11 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 11 +- .../x/sys/unix/zsysnum_linux_s390x.go | 11 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 11 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 + .../x/sys/unix/zsysnum_zos_s390x.go | 5508 ++++++++--------- .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 1 + .../x/sys/unix/ztypes_darwin_arm64.go | 1 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 254 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 9 + .../x/sys/unix/ztypes_linux_amd64.go | 10 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 10 + .../x/sys/unix/ztypes_linux_arm64.go | 10 + .../x/sys/unix/ztypes_linux_loong64.go | 10 + .../x/sys/unix/ztypes_linux_mips.go | 10 + .../x/sys/unix/ztypes_linux_mips64.go | 10 + .../x/sys/unix/ztypes_linux_mips64le.go | 10 + .../x/sys/unix/ztypes_linux_mipsle.go | 10 + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 10 + .../x/sys/unix/ztypes_linux_ppc64.go | 10 + .../x/sys/unix/ztypes_linux_ppc64le.go | 10 + .../x/sys/unix/ztypes_linux_riscv64.go | 10 + .../x/sys/unix/ztypes_linux_s390x.go | 10 + .../x/sys/unix/ztypes_linux_sparc64.go | 10 + .../x/sys/unix/ztypes_netbsd_386.go | 1 + .../x/sys/unix/ztypes_netbsd_amd64.go | 1 + .../x/sys/unix/ztypes_netbsd_arm.go | 1 + .../x/sys/unix/ztypes_netbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_386.go | 1 + .../x/sys/unix/ztypes_openbsd_amd64.go | 1 + .../x/sys/unix/ztypes_openbsd_arm.go | 1 + .../x/sys/unix/ztypes_openbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_mips64.go | 1 + .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 + .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 + .../x/sys/unix/ztypes_solaris_amd64.go | 1 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 147 +- vendor/golang.org/x/sys/windows/aliases.go | 3 +- vendor/golang.org/x/sys/windows/empty.s | 9 + .../golang.org/x/sys/windows/env_windows.go | 17 +- vendor/golang.org/x/sys/windows/eventlog.go | 1 + vendor/golang.org/x/sys/windows/mksyscall.go | 1 + vendor/golang.org/x/sys/windows/race.go | 1 + vendor/golang.org/x/sys/windows/race0.go | 1 + .../golang.org/x/sys/windows/registry/key.go | 1 + .../x/sys/windows/registry/mksyscall.go | 1 + .../x/sys/windows/registry/syscall.go | 1 + .../x/sys/windows/registry/value.go | 1 + vendor/golang.org/x/sys/windows/service.go | 1 + vendor/golang.org/x/sys/windows/str.go | 1 + vendor/golang.org/x/sys/windows/syscall.go | 1 + .../x/sys/windows/syscall_windows.go | 92 +- .../golang.org/x/sys/windows/types_windows.go | 52 +- .../x/sys/windows/zsyscall_windows.go | 145 - .../x/text/secure/bidirule/bidirule10.0.0.go | 1 + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 + .../x/text/unicode/bidi/tables10.0.0.go | 1 + .../x/text/unicode/bidi/tables11.0.0.go | 1 + .../x/text/unicode/bidi/tables12.0.0.go | 1 + .../x/text/unicode/bidi/tables13.0.0.go | 1 + .../x/text/unicode/bidi/tables15.0.0.go | 1 + .../x/text/unicode/bidi/tables9.0.0.go | 1 + .../x/text/unicode/norm/tables10.0.0.go | 1 + .../x/text/unicode/norm/tables11.0.0.go | 1 + .../x/text/unicode/norm/tables12.0.0.go | 1 + .../x/text/unicode/norm/tables13.0.0.go | 1 + .../x/text/unicode/norm/tables15.0.0.go | 1 + .../x/text/unicode/norm/tables9.0.0.go | 1 + .../genproto/{googleapis/rpc => }/LICENSE | 0 .../googleapis/rpc/status/status.pb.go | 10 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 25 +- vendor/google.golang.org/grpc/README.md | 60 +- .../grpc/attributes/attributes.go | 72 +- .../grpc/balancer/balancer.go | 70 +- .../grpc/balancer/base/balancer.go | 22 +- .../grpc/balancer_conn_wrappers.go | 481 ++ .../grpc/balancer_wrapper.go | 380 -- .../grpc_binarylog_v1/binarylog.pb.go | 11 +- vendor/google.golang.org/grpc/call.go | 6 +- vendor/google.golang.org/grpc/clientconn.go | 979 ++- vendor/google.golang.org/grpc/codec.go | 8 +- .../grpc/codes/code_string.go | 51 +- vendor/google.golang.org/grpc/codes/codes.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 79 +- vendor/google.golang.org/grpc/dialoptions.go | 146 +- .../grpc/encoding/encoding.go | 21 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 63 +- .../google.golang.org/grpc/health/client.go | 117 - .../grpc/health/grpc_health_v1/health.pb.go | 9 +- .../health/grpc_health_v1/health_grpc.pb.go | 37 +- .../google.golang.org/grpc/health/logging.go | 23 - .../google.golang.org/grpc/health/server.go | 163 - vendor/google.golang.org/grpc/interceptor.go | 12 +- .../grpc/internal/backoff/backoff.go | 36 - .../balancer/gracefulswitch/gracefulswitch.go | 59 +- .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/binarylog.go | 11 +- .../grpc/internal/binarylog/method_logger.go | 146 +- .../grpc/internal/binarylog/sink.go | 12 +- .../grpc/internal/buffer/unbounded.go | 63 +- .../grpc/internal/channelz/funcs.go | 76 +- .../grpc/internal/channelz/logging.go | 12 +- .../grpc/internal/channelz/types.go | 5 - .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../grpc/internal/envconfig/envconfig.go | 46 +- .../grpc/internal/envconfig/observability.go | 6 - .../grpc/internal/envconfig/xds.go | 47 +- .../grpc/internal/experimental.go | 28 - .../grpc/internal/grpclog/grpclog.go | 40 +- .../grpc/internal/grpclog/prefixLogger.go | 20 +- .../grpc/internal/grpcrand/grpcrand.go | 21 - .../internal/grpcsync/callback_serializer.go | 100 - .../grpc/internal/grpcsync/pubsub.go | 121 - .../grpc/internal/idle/idle.go | 278 - .../grpc/internal/internal.go | 103 +- .../grpc/internal/metadata/metadata.go | 64 +- .../grpc/internal/pretty/pretty.go | 2 +- .../grpc/internal/resolver/config_selector.go | 4 +- .../internal/resolver/dns/dns_resolver.go | 139 +- .../resolver/dns/internal/internal.go | 70 - .../resolver/passthrough/passthrough.go | 11 +- .../grpc/internal/resolver/unix/unix.go | 8 +- .../grpc/internal/serviceconfig/duration.go | 130 - .../grpc/internal/status/status.go | 36 +- .../grpc/internal/tcp_keepalive_others.go | 29 - .../grpc/internal/tcp_keepalive_unix.go | 54 - .../grpc/internal/tcp_keepalive_windows.go | 54 - .../grpc/internal/transport/controlbuf.go | 143 +- .../grpc/internal/transport/defaults.go | 6 - .../grpc/internal/transport/handler_server.go | 128 +- .../grpc/internal/transport/http2_client.go | 202 +- .../grpc/internal/transport/http2_server.go | 353 +- .../grpc/internal/transport/http_util.go | 103 +- .../grpc/internal/transport/logging.go | 40 - .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 70 +- .../grpc/metadata/metadata.go | 44 +- vendor/google.golang.org/grpc/peer/peer.go | 2 - .../google.golang.org/grpc/picker_wrapper.go | 79 +- vendor/google.golang.org/grpc/pickfirst.go | 106 +- vendor/google.golang.org/grpc/preloader.go | 2 +- .../grpc/reflection/README.md | 2 +- .../grpc/reflection/adapt.go | 187 - .../grpc_reflection_v1alpha/reflection.pb.go | 445 +- .../grpc_reflection_v1alpha/reflection.proto | 138 + .../reflection_grpc.pb.go | 20 +- .../grpc/reflection/serverreflection.go | 102 +- vendor/google.golang.org/grpc/regenerate.sh | 7 +- .../grpc/resolver/dns/dns_resolver.go | 36 - vendor/google.golang.org/grpc/resolver/map.go | 123 +- .../grpc/resolver/resolver.go | 158 +- .../grpc/resolver_conn_wrapper.go | 176 + .../grpc/resolver_wrapper.go | 197 - vendor/google.golang.org/grpc/rpc_util.go | 120 +- vendor/google.golang.org/grpc/server.go | 764 +-- .../google.golang.org/grpc/service_config.go | 83 +- .../grpc/shared_buffer_pool.go | 154 - vendor/google.golang.org/grpc/stats/stats.go | 36 +- .../google.golang.org/grpc/status/status.go | 59 +- vendor/google.golang.org/grpc/stream.go | 244 +- vendor/google.golang.org/grpc/tap/tap.go | 6 - vendor/google.golang.org/grpc/trace.go | 6 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 200 +- .../protobuf/encoding/protojson/decode.go | 42 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 59 +- .../encoding/protojson/well_known_types.go | 59 +- .../protobuf/encoding/prototext/decode.go | 12 +- .../protobuf/encoding/prototext/encode.go | 24 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 184 +- .../internal/editiondefaults/defaults.go | 12 - .../editiondefaults/editions_defaults.binpb | Bin 93 -> 0 bytes .../internal/editionssupport/editions.go | 13 - .../protobuf/internal/encoding/json/decode.go | 4 +- .../protobuf/internal/encoding/tag/tag.go | 4 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/errors.go | 21 +- .../protobuf/internal/filedesc/desc.go | 166 +- .../protobuf/internal/filedesc/desc_init.go | 89 +- .../protobuf/internal/filedesc/desc_lazy.go | 45 +- .../internal/filedesc/desc_list_gen.go | 11 - .../protobuf/internal/filedesc/editions.go | 156 - .../protobuf/internal/filedesc/placeholder.go | 1 - .../protobuf/internal/filetype/build.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 401 +- .../internal/genid/go_features_gen.go | 31 - .../protobuf/internal/genid/struct_gen.go | 5 - .../protobuf/internal/genid/type_gen.go | 38 - .../protobuf/internal/impl/api_export.go | 6 +- .../protobuf/internal/impl/checkinit.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 44 +- .../protobuf/internal/impl/codec_field.go | 64 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_map.go | 15 +- .../internal/impl/codec_messageset.go | 22 - .../protobuf/internal/impl/codec_tables.go | 2 +- .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_list.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/encode.go | 48 +- .../protobuf/internal/impl/extension.go | 8 +- .../protobuf/internal/impl/legacy_enum.go | 3 +- .../internal/impl/legacy_extension.go | 2 +- .../protobuf/internal/impl/legacy_file.go | 4 +- .../protobuf/internal/impl/legacy_message.go | 31 +- .../protobuf/internal/impl/message.go | 23 +- .../protobuf/internal/impl/message_reflect.go | 45 +- .../internal/impl/message_reflect_field.go | 2 +- .../internal/impl/message_reflect_gen.go | 146 +- .../protobuf/internal/impl/pointer_reflect.go | 42 +- .../protobuf/internal/impl/pointer_unsafe.go | 44 +- .../protobuf/internal/order/range.go | 4 +- .../protobuf/internal/strs/strings.go | 2 +- ...ings_unsafe_go120.go => strings_unsafe.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 - .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 46 +- .../protobuf/proto/extension.go | 19 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../google.golang.org/protobuf/proto/size.go | 2 - .../protobuf/reflect/protodesc/desc.go | 34 +- .../protobuf/reflect/protodesc/desc_init.go | 43 +- .../reflect/protodesc/desc_resolve.go | 9 +- .../reflect/protodesc/desc_validate.go | 75 +- .../protobuf/reflect/protodesc/editions.go | 145 - .../protobuf/reflect/protodesc/proto.go | 40 +- .../protobuf/reflect/protoreflect/proto.go | 87 +- .../reflect/protoreflect/source_gen.go | 85 +- .../protobuf/reflect/protoreflect/type.go | 56 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_pure.go | 14 +- .../reflect/protoreflect/value_union.go | 58 +- ...{value_unsafe_go120.go => value_unsafe.go} | 10 +- .../protoreflect/value_unsafe_go121.go | 87 - .../reflect/protoregistry/registry.go | 38 +- .../types/descriptorpb/descriptor.pb.go | 2594 ++------ .../protobuf/types/dynamicpb/dynamic.go | 59 +- .../protobuf/types/dynamicpb/types.go | 25 +- .../types/gofeaturespb/go_features.pb.go | 181 - .../protobuf/types/known/anypb/any.pb.go | 7 +- .../protobuf/types/known/apipb/api.pb.go | 8 +- .../types/known/durationpb/duration.pb.go | 4 +- .../protobuf/types/known/emptypb/empty.pb.go | 4 +- .../types/known/fieldmaskpb/field_mask.pb.go | 4 +- .../sourcecontextpb/source_context.pb.go | 4 +- .../types/known/structpb/struct.pb.go | 50 +- .../types/known/timestamppb/timestamp.pb.go | 4 +- .../protobuf/types/known/typepb/type.pb.go | 12 +- .../types/known/wrapperspb/wrappers.pb.go | 20 +- .../protobuf/types/pluginpb/plugin.pb.go | 168 +- vendor/modules.txt | 70 +- 824 files changed, 17675 insertions(+), 36066 deletions(-) delete mode 100644 dialtesting/greeter/b.sh delete mode 100644 dialtesting/greeter/greeter.pb.go delete mode 100644 dialtesting/greeter/greeter.proto delete mode 100644 dialtesting/greeter/greeter_grpc.pb.go create mode 100644 dialtesting/grpc_script.go create mode 100644 dialtesting/grpc_script_test.go create mode 100644 dialtesting/grpcproto/common.proto create mode 100644 dialtesting/grpcproto/greeter.proto delete mode 100644 vendor/github.com/bufbuild/protocompile/.protoc_version delete mode 100644 vendor/github.com/bufbuild/protocompile/internal/editions/editions.go delete mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset delete mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go delete mode 100644 vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset delete mode 100644 vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go delete mode 100644 vendor/github.com/bufbuild/protocompile/linker/pool.go delete mode 100644 vendor/github.com/bufbuild/protocompile/options/source_retention_options.go delete mode 100644 vendor/github.com/bufbuild/protocompile/options/target_types.go delete mode 100644 vendor/github.com/bufbuild/protocompile/protoutil/editions.go delete mode 100644 vendor/github.com/bufbuild/protocompile/supported_editions.go delete mode 100644 vendor/github.com/go-ping/ping/.editorconfig delete mode 100644 vendor/github.com/go-ping/ping/.gitignore delete mode 100644 vendor/github.com/go-ping/ping/.golangci.yml delete mode 100644 vendor/github.com/go-ping/ping/.goreleaser.yml delete mode 100644 vendor/github.com/go-ping/ping/CONTRIBUTING.md delete mode 100644 vendor/github.com/go-ping/ping/LICENSE delete mode 100644 vendor/github.com/go-ping/ping/Makefile delete mode 100644 vendor/github.com/go-ping/ping/README.md delete mode 100644 vendor/github.com/go-ping/ping/logger.go delete mode 100644 vendor/github.com/go-ping/ping/packetconn.go delete mode 100644 vendor/github.com/go-ping/ping/ping.go delete mode 100644 vendor/github.com/go-ping/ping/utils_linux.go delete mode 100644 vendor/github.com/go-ping/ping/utils_other.go delete mode 100644 vendor/github.com/go-ping/ping/utils_windows.go create mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CHANGELOG.md delete mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go rename vendor/{google.golang.org/grpc/reflection => github.com/jhump/protoreflect/grpcreflect/internal}/grpc_reflection_v1/reflection.pb.go (56%) create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto rename vendor/{google.golang.org/grpc/reflection => github.com/jhump/protoreflect/grpcreflect/internal}/grpc_reflection_v1/reflection_grpc.pb.go (76%) create mode 100644 vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go delete mode 100644 vendor/github.com/tidwall/tinylru/LICENSE delete mode 100644 vendor/github.com/tidwall/tinylru/README.md delete mode 100644 vendor/github.com/tidwall/tinylru/lru.go delete mode 100644 vendor/github.com/tidwall/wal/.gitignore delete mode 100644 vendor/github.com/tidwall/wal/LICENSE delete mode 100644 vendor/github.com/tidwall/wal/README.md delete mode 100644 vendor/github.com/tidwall/wal/wal.go create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/go115.go create mode 100644 vendor/golang.org/x/net/http2/go118.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go create mode 100644 vendor/golang.org/x/net/http2/not_go115.go create mode 100644 vendor/golang.org/x/net/http2/not_go118.go delete mode 100644 vendor/golang.org/x/net/http2/testsync.go delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go delete mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/windows/empty.s rename vendor/google.golang.org/genproto/{googleapis/rpc => }/LICENSE (100%) create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/health/client.go delete mode 100644 vendor/google.golang.org/grpc/health/logging.go delete mode 100644 vendor/google.golang.org/grpc/health/server.go delete mode 100644 vendor/google.golang.org/grpc/internal/experimental.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go delete mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go delete mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go delete mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go delete mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go delete mode 100644 vendor/google.golang.org/grpc/reflection/adapt.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go delete mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go delete mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb delete mode 100644 vendor/google.golang.org/protobuf/internal/editionssupport/editions.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go120.go => strings_unsafe.go} (96%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go120.go => value_unsafe.go} (93%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go delete mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go diff --git a/dialtesting/greeter/b.sh b/dialtesting/greeter/b.sh deleted file mode 100644 index a2e329d6..00000000 --- a/dialtesting/greeter/b.sh +++ /dev/null @@ -1,3 +0,0 @@ -protoc --go_out=. --go_opt=paths=source_relative \ - --go-grpc_out=. --go-grpc_opt=paths=source_relative \ - greeter.proto diff --git a/dialtesting/greeter/greeter.pb.go b/dialtesting/greeter/greeter.pb.go deleted file mode 100644 index b186b856..00000000 --- a/dialtesting/greeter/greeter.pb.go +++ /dev/null @@ -1,224 +0,0 @@ -// 指定使用 proto3 语法。如果你不写,默认是 proto2。 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v5.27.0 -// source: greeter.proto - -// 定义包名,这有助于防止不同 .proto 文件之间的命名冲突。 -// 在生成的 Go 代码中,这会成为包名的一部分。 - -package greeter - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// 定义请求消息 (Request Message)。 -// 它定义了调用 SayHello 方法时需要传递的数据结构。 -type HelloRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // 字段类型: string - // 字段名: name - // 字段编号: 1 (在一个 message 中,每个字段的编号必须是唯一的) - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *HelloRequest) Reset() { - *x = HelloRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_greeter_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HelloRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HelloRequest) ProtoMessage() {} - -func (x *HelloRequest) ProtoReflect() protoreflect.Message { - mi := &file_greeter_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HelloRequest.ProtoReflect.Descriptor instead. -func (*HelloRequest) Descriptor() ([]byte, []int) { - return file_greeter_proto_rawDescGZIP(), []int{0} -} - -func (x *HelloRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// 定义响应消息 (Response Message)。 -// 它定义了 SayHello 方法成功返回时的数据结构。 -type HelloReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *HelloReply) Reset() { - *x = HelloReply{} - if protoimpl.UnsafeEnabled { - mi := &file_greeter_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HelloReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HelloReply) ProtoMessage() {} - -func (x *HelloReply) ProtoReflect() protoreflect.Message { - mi := &file_greeter_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HelloReply.ProtoReflect.Descriptor instead. -func (*HelloReply) Descriptor() ([]byte, []int) { - return file_greeter_proto_rawDescGZIP(), []int{1} -} - -func (x *HelloReply) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -var File_greeter_proto protoreflect.FileDescriptor - -var file_greeter_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x0c, 0x48, 0x65, 0x6c, 0x6c, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x26, 0x0a, 0x0a, - 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x32, 0x43, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x38, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x15, 0x2e, 0x67, 0x72, - 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x6c, - 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x21, 0x5a, 0x1f, 0x65, 0x78, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x79, 0x2d, 0x67, 0x72, 0x70, 0x63, - 0x2d, 0x61, 0x70, 0x70, 0x2f, 0x67, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_greeter_proto_rawDescOnce sync.Once - file_greeter_proto_rawDescData = file_greeter_proto_rawDesc -) - -func file_greeter_proto_rawDescGZIP() []byte { - file_greeter_proto_rawDescOnce.Do(func() { - file_greeter_proto_rawDescData = protoimpl.X.CompressGZIP(file_greeter_proto_rawDescData) - }) - return file_greeter_proto_rawDescData -} - -var file_greeter_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_greeter_proto_goTypes = []interface{}{ - (*HelloRequest)(nil), // 0: greeter.HelloRequest - (*HelloReply)(nil), // 1: greeter.HelloReply -} -var file_greeter_proto_depIdxs = []int32{ - 0, // 0: greeter.Greeter.SayHello:input_type -> greeter.HelloRequest - 1, // 1: greeter.Greeter.SayHello:output_type -> greeter.HelloReply - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_greeter_proto_init() } -func file_greeter_proto_init() { - if File_greeter_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_greeter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HelloRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_greeter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HelloReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_greeter_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_greeter_proto_goTypes, - DependencyIndexes: file_greeter_proto_depIdxs, - MessageInfos: file_greeter_proto_msgTypes, - }.Build() - File_greeter_proto = out.File - file_greeter_proto_rawDesc = nil - file_greeter_proto_goTypes = nil - file_greeter_proto_depIdxs = nil -} diff --git a/dialtesting/greeter/greeter.proto b/dialtesting/greeter/greeter.proto deleted file mode 100644 index d1e6b71b..00000000 --- a/dialtesting/greeter/greeter.proto +++ /dev/null @@ -1,35 +0,0 @@ -// 指定使用 proto3 语法。如果你不写,默认是 proto2。 -syntax = "proto3"; - -// 定义包名,这有助于防止不同 .proto 文件之间的命名冲突。 -// 在生成的 Go 代码中,这会成为包名的一部分。 -package greeter; - -// [重要] 为 Go 语言指定生成的包路径。 -// 你应该把它改成你自己项目的 Go module 路径。 -// 例如: "github.com/your_user/your_project/protos/greeter" -option go_package = "example.com/my-grpc-app/greeter"; - -// 定义服务 (Service)。服务可以看作是 RPC 方法的集合。 -// 客户端可以调用这些方法。 -service Greeter { - // 定义一个 RPC 方法,名为 SayHello。 - // 它接收一个 HelloRequest 消息作为参数, - // 并返回一个 HelloReply 消息。 - rpc SayHello (HelloRequest) returns (HelloReply) {} -} - -// 定义请求消息 (Request Message)。 -// 它定义了调用 SayHello 方法时需要传递的数据结构。 -message HelloRequest { - // 字段类型: string - // 字段名: name - // 字段编号: 1 (在一个 message 中,每个字段的编号必须是唯一的) - string name = 1; -} - -// 定义响应消息 (Response Message)。 -// 它定义了 SayHello 方法成功返回时的数据结构。 -message HelloReply { - string message = 1; -} diff --git a/dialtesting/greeter/greeter_grpc.pb.go b/dialtesting/greeter/greeter_grpc.pb.go deleted file mode 100644 index b609f868..00000000 --- a/dialtesting/greeter/greeter_grpc.pb.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v5.27.0 -// source: greeter.proto - -package greeter - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// GreeterClient is the client API for Greeter service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GreeterClient interface { - // 定义一个 RPC 方法,名为 SayHello。 - // 它接收一个 HelloRequest 消息作为参数, - // 并返回一个 HelloReply 消息。 - SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) -} - -type greeterClient struct { - cc grpc.ClientConnInterface -} - -func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { - return &greeterClient{cc} -} - -func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { - out := new(HelloReply) - err := c.cc.Invoke(ctx, "/greeter.Greeter/SayHello", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GreeterServer is the server API for Greeter service. -// All implementations must embed UnimplementedGreeterServer -// for forward compatibility -type GreeterServer interface { - // 定义一个 RPC 方法,名为 SayHello。 - // 它接收一个 HelloRequest 消息作为参数, - // 并返回一个 HelloReply 消息。 - SayHello(context.Context, *HelloRequest) (*HelloReply, error) - mustEmbedUnimplementedGreeterServer() -} - -// UnimplementedGreeterServer must be embedded to have forward compatible implementations. -type UnimplementedGreeterServer struct { -} - -func (UnimplementedGreeterServer) SayHello(context.Context, *HelloRequest) (*HelloReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SayHello not implemented") -} -func (UnimplementedGreeterServer) mustEmbedUnimplementedGreeterServer() {} - -// UnsafeGreeterServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GreeterServer will -// result in compilation errors. -type UnsafeGreeterServer interface { - mustEmbedUnimplementedGreeterServer() -} - -func RegisterGreeterServer(s grpc.ServiceRegistrar, srv GreeterServer) { - s.RegisterService(&Greeter_ServiceDesc, srv) -} - -func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HelloRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GreeterServer).SayHello(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/greeter.Greeter/SayHello", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Greeter_ServiceDesc is the grpc.ServiceDesc for Greeter service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Greeter_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "greeter.Greeter", - HandlerType: (*GreeterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SayHello", - Handler: _Greeter_SayHello_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "greeter.proto", -} diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index aa4ffca0..8274e18d 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -8,11 +8,14 @@ package dialtesting import ( "bufio" "context" + "crypto/tls" + "crypto/x509" "encoding/json" + "errors" "fmt" "net" - "path/filepath" "strings" + "text/template" "time" pdesc "github.com/jhump/protoreflect/desc" @@ -21,7 +24,10 @@ import ( "github.com/jhump/protoreflect/dynamic/grpcdynamic" "github.com/jhump/protoreflect/grpcreflect" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" ) var ( @@ -30,181 +36,420 @@ var ( ) const ( - DefaultGRPCTimeout = 30 * time.Second + DefaultGRPCTimeout = 30 * time.Second + HealthCheckServiceName = "grpc.health.v1.Health" + HealthCheckMethodName = "Check" ) -type GRPCTask struct { - *Task - Server string `json:"server"` +type GRPCOptCertificate struct { + IgnoreServerCertificateError bool `json:"ignore_server_certificate_error,omitempty"` + PrivateKey string `json:"private_key,omitempty"` + Certificate string `json:"certificate,omitempty"` + CaCert string `json:"ca,omitempty"` +} + +type GRPCSecret struct { + NoSaveResponseBody bool `json:"not_save,omitempty"` +} + +type GRPCSuccess struct { + Body []*SuccessOption `json:"body,omitempty"` + ResponseTime string `json:"response_time,omitempty"` + respTime time.Duration +} + +type GRPCProtoFilesDiscovery struct { + ProtoFiles map[string]string `json:"protofiles"` FullMethod string `json:"full_method"` - ProtoFiles map[string]string `json:"protofiles"` // user's multiple .proto files - JSONRequest []byte `json:"request"` // user's gRPC request are JSON bytes - Timeout string `json:"timeout"` // request timeout, e.g., "30s", "1m" + JSONRequest string `json:"request,omitempty"` +} - conn *grpc.ClientConn - method *pdesc.MethodDescriptor +type GRPCReflectionDiscovery struct { + FullMethod string `json:"full_method"` + JSONRequest string `json:"request,omitempty"` +} - result []byte - reqError string - reqCost time.Duration - timeout time.Duration +type GRPCHealthCheckDiscovery struct { + Service string `json:"service,omitempty"` } -func (t *GRPCTask) stop() { - if t.conn != nil { - t.conn.Close() +type GRPCOptRequest struct { + Metadata map[string]string `json:"metadata,omitempty"` + RequestTimeout string `json:"request_timeout,omitempty"` + ProtoFiles *GRPCProtoFilesDiscovery `json:"proto_files,omitempty"` + Reflection *GRPCReflectionDiscovery `json:"reflection,omitempty"` + HealthCheck *GRPCHealthCheckDiscovery `json:"health_check,omitempty"` +} + +type GRPCAdvanceOption struct { + RequestOptions *GRPCOptRequest `json:"request_options,omitempty"` + Certificate *GRPCOptCertificate `json:"certificate,omitempty"` + Secret *GRPCSecret `json:"secret,omitempty"` +} + +type GRPCTask struct { + *Task + Server string `json:"server"` + PostScript string `json:"post_script,omitempty"` + SuccessWhenLogic string `json:"success_when_logic"` + SuccessWhen []*GRPCSuccess `json:"success_when"` + AdvanceOptions *GRPCAdvanceOption `json:"advance_options,omitempty"` + + creds credentials.TransportCredentials + + result []byte + reqError string + reqCost time.Duration + timeout time.Duration + postScriptResult *ScriptResult + + rawTask *GRPCTask + methodDescriptor *pdesc.MethodDescriptor // cached method descriptor for ProtoFiles discovery +} + +func (t *GRPCTask) initTask() { + if t.Task == nil { + t.Task = &Task{} } } +func (t *GRPCTask) check() error { + if t.Server == "" { + return fmt.Errorf("server address is required") + } + if t.AdvanceOptions != nil && + t.AdvanceOptions.RequestOptions != nil && + t.AdvanceOptions.RequestOptions.ProtoFiles != nil && + len(t.AdvanceOptions.RequestOptions.ProtoFiles.ProtoFiles) == 0 { + return fmt.Errorf("proto files not provided") + } + if t.getFullMethod() == "" { + return fmt.Errorf("full method is required") + } + if len(t.SuccessWhen) == 0 && t.PostScript == "" { + return fmt.Errorf(`no any check rule`) + } + + return nil +} + func (t *GRPCTask) init() error { - // parse timeout + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return fmt.Errorf("advance options required") + } + opt := t.AdvanceOptions + reqOpt := opt.RequestOptions + t.timeout = DefaultGRPCTimeout - if t.Timeout != "" { - timeout, err := time.ParseDuration(t.Timeout) + if reqOpt.RequestTimeout != "" { + timeout, err := time.ParseDuration(reqOpt.RequestTimeout) if err != nil { - return fmt.Errorf("invalid timeout %q: %w", t.Timeout, err) + return fmt.Errorf("invalid timeout %q: %w", reqOpt.RequestTimeout, err) } t.timeout = timeout } - conn, err := grpc.Dial(t.Server, grpc.WithTransportCredentials(insecure.NewCredentials())) + // init success checker + for _, checker := range t.SuccessWhen { + if checker == nil { + continue + } + if checker.ResponseTime != "" { + du, err := time.ParseDuration(checker.ResponseTime) + if err != nil { + return fmt.Errorf("invalid response time %q: %w", checker.ResponseTime, err) + } + checker.respTime = du + } + + // body + for _, v := range checker.Body { + if v == nil { + continue + } + if err := genReg(v); err != nil { + return fmt.Errorf("compile regex failed: %w", err) + } + } + } + + // setup transport credentials + var err error + t.creds, err = t.buildTLSCredentials() if err != nil { - return err + return fmt.Errorf("build TLS credentials failed: %w", err) } - t.conn = conn - if t.FullMethod != "" { - ctx, cancel := context.WithTimeout(context.Background(), t.timeout) - defer cancel() - if err := t.findMethod(ctx); err != nil { - t.conn.Close() - return err + // Cache method descriptor if using ProtoFiles discovery + if reqOpt.ProtoFiles != nil && len(reqOpt.ProtoFiles.ProtoFiles) > 0 { + methodDesc, err := t.findMethodAmongProtofiles() + if err != nil { + return fmt.Errorf("find method descriptor failed: %w", err) } + t.methodDescriptor = methodDesc } return nil } -func (t *GRPCTask) findMethod(ctx context.Context) error { - if len(t.ProtoFiles) > 0 { - err := t.findMethodAmongProtofiles() - if err != nil { - return fmt.Errorf("find method via proto files: %w", err) +func (t *GRPCTask) buildTLSCredentials() (credentials.TransportCredentials, error) { + opt := t.AdvanceOptions + if opt == nil || opt.Certificate == nil { + return insecure.NewCredentials(), nil + } + + cert := opt.Certificate + + // if ignore server certificate error, use insecure TLS config + if cert.IgnoreServerCertificateError { + config := &tls.Config{ + InsecureSkipVerify: true, //nolint:gosec } - return nil + return credentials.NewTLS(config), nil } - err := t.findMethodByReflection(ctx) + // if CA cert is provided, setup mTLS + if cert.CaCert != "" { + caCertPool := x509.NewCertPool() + if !caCertPool.AppendCertsFromPEM([]byte(cert.CaCert)) { + return nil, fmt.Errorf("failed to append CA certificate") + } + + config := &tls.Config{ + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + } + + // if client certificate and private key are provided, add them for mTLS + if cert.Certificate != "" && cert.PrivateKey != "" { + clientCert, err := tls.X509KeyPair([]byte(cert.Certificate), []byte(cert.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("failed to load client certificate: %w", err) + } + config.Certificates = []tls.Certificate{clientCert} + } + + return credentials.NewTLS(config), nil + } + + return insecure.NewCredentials(), nil +} + +func (t *GRPCTask) findMethod(ctx context.Context, conn *grpc.ClientConn) (*pdesc.MethodDescriptor, error) { + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil { + return nil, fmt.Errorf("request options required") + } + + reqOpt := opt.RequestOptions + + if reqOpt.ProtoFiles != nil { + if len(reqOpt.ProtoFiles.ProtoFiles) == 0 { + return nil, fmt.Errorf("proto files not provided") + } + return t.findMethodAmongProtofiles() + } + + if reqOpt.Reflection != nil { + return t.findMethodByReflection(ctx, conn) + } + + if reqOpt.HealthCheck != nil { + return t.findHealthCheckMethod() + } + + return nil, fmt.Errorf("no discovery method configured (proto_files, reflection, or health_check)") +} + +func (t *GRPCTask) findHealthCheckMethod() (*pdesc.MethodDescriptor, error) { + healthFD := grpc_health_v1.File_grpc_health_v1_health_proto + if healthFD == nil { + return nil, fmt.Errorf("health check file descriptor not available") + } + + fd, err := pdesc.WrapFile(healthFD) if err != nil { - return fmt.Errorf("find method via reflection: %w", err) + return nil, fmt.Errorf("wrap health check file descriptor failed: %w", err) } - return nil + + sd := fd.FindService(HealthCheckServiceName) + if sd == nil { + return nil, fmt.Errorf("health check service %s not found", HealthCheckServiceName) + } + + md := sd.FindMethodByName(HealthCheckMethodName) + if md == nil { + return nil, fmt.Errorf("health check method %s not found", HealthCheckMethodName) + } + + return md, nil } -func (t *GRPCTask) findMethodByReflection(ctx context.Context) error { - rc := grpcreflect.NewClientAuto(ctx, t.conn) +func (t *GRPCTask) findMethodByReflection(ctx context.Context, conn *grpc.ClientConn) (*pdesc.MethodDescriptor, error) { + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil || opt.RequestOptions.Reflection == nil { + return nil, fmt.Errorf("reflection discovery not configured") + } + + fullMethod := opt.RequestOptions.Reflection.FullMethod + if fullMethod == "" { + return nil, fmt.Errorf("full method is required for reflection discovery") + } + fullMethod = strings.TrimPrefix(fullMethod, "/") + + rc := grpcreflect.NewClientAuto(ctx, conn) defer rc.Reset() - slash := strings.LastIndex(t.FullMethod, "/") + slash := strings.LastIndex(fullMethod, "/") if slash == -1 { - return fmt.Errorf("invalid full method name: %s", t.FullMethod) + return nil, fmt.Errorf("invalid full method name: %s", fullMethod) } - serviceName := t.FullMethod[:slash] + serviceName := fullMethod[:slash] fd, err := rc.FileContainingSymbol(serviceName) if err != nil { - return err + return nil, err } sd := fd.FindService(serviceName) if sd == nil { - return fmt.Errorf("service %s not found", serviceName) + return nil, fmt.Errorf("service %s not found", serviceName) } - methodName := t.FullMethod[slash+1:] + methodName := fullMethod[slash+1:] md := sd.FindMethodByName(methodName) if md == nil { - return fmt.Errorf("method %s not found in service %s", methodName, serviceName) + return nil, fmt.Errorf("method %s not found in service %s", methodName, serviceName) } - t.method = md - return nil + return md, nil } -func (t *GRPCTask) findMethodAmongProtofiles() error { - extendedMap := buildExtendedProtoMap(t.ProtoFiles) +func (t *GRPCTask) findMethodAmongProtofiles() (*pdesc.MethodDescriptor, error) { + // Return cached method descriptor if available + if t.methodDescriptor != nil { + return t.methodDescriptor, nil + } + + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil || opt.RequestOptions.ProtoFiles == nil { + return nil, fmt.Errorf("proto files discovery not configured") + } + + protoFiles := opt.RequestOptions.ProtoFiles.ProtoFiles + fullMethod := opt.RequestOptions.ProtoFiles.FullMethod + + if len(protoFiles) == 0 { + return nil, fmt.Errorf("proto files not provided") + } + if fullMethod == "" { + return nil, fmt.Errorf("full method is required for proto files discovery") + } + fullMethod = strings.TrimPrefix(fullMethod, "/") + + extendedMap, err := buildExtendedProtoMap(protoFiles) + if err != nil { + return nil, err + } p := protoparse.Parser{ Accessor: protoparse.FileContentsFromMap(extendedMap), InferImportPaths: true, } - desc, err := p.ParseFiles(getFileNames(t.ProtoFiles)...) + desc, err := p.ParseFiles(getFileNames(protoFiles)...) if err != nil { - return fmt.Errorf("parse proto files failed: %w", err) + return nil, fmt.Errorf("parse proto files failed: %w", err) } - sepIdx := strings.LastIndex(t.FullMethod, "/") + sepIdx := strings.LastIndex(fullMethod, "/") if sepIdx == -1 { - return fmt.Errorf("invalid fullMethod: %q", t.FullMethod) + return nil, fmt.Errorf("invalid fullMethod: %q", fullMethod) } - service := t.FullMethod[:sepIdx] - method := t.FullMethod[sepIdx+1:] + service := fullMethod[:sepIdx] + method := fullMethod[sepIdx+1:] for _, fd := range desc { if sd := fd.FindService(service); sd != nil { if md := sd.FindMethodByName(method); md != nil { - t.method = md - return nil + return md, nil } } } - return fmt.Errorf("method %s not found in service %s", t.FullMethod, service) + return nil, fmt.Errorf("method %s not found in service %s", fullMethod, service) } -func buildExtendedProtoMap(protoFiles map[string]string) map[string]string { - extendedMap := make(map[string]string) - - // Add original files and their base names +func buildExtendedProtoMap(protoFiles map[string]string) (map[string]string, error) { + extendedMap := make(map[string]string, len(protoFiles)) for k, v := range protoFiles { extendedMap[k] = v - extendedMap[filepath.Base(k)] = v } - - // Parse imports and build mappings: for each import, find matching file by base name + var missingImports []string for _, content := range protoFiles { for _, imp := range extractImports(content) { - if extendedMap[imp] == "" { - importBase := filepath.Base(imp) - for filename, fileContent := range protoFiles { - if filepath.Base(filename) == importBase { - extendedMap[imp] = fileContent - break - } - } + if _, ok := extendedMap[imp]; !ok { + missingImports = append(missingImports, imp) } } } - - return extendedMap + if len(missingImports) > 0 { + return nil, fmt.Errorf("missing imports: %s", strings.Join(missingImports, ", ")) + } + return extendedMap, nil } -// extractImports extracts all import statements from proto file content +// extractImports extracts all import statements from proto file content. func extractImports(content string) []string { var imports []string scanner := bufio.NewScanner(strings.NewReader(content)) + for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, "import ") { - start := strings.Index(line, `"`) - if start != -1 { - end := strings.Index(line[start+1:], `"`) - if end != -1 { - imports = append(imports, line[start+1:start+1+end]) + line := scanner.Text() + + // Remove inline comments (content after //) + if commentIdx := strings.Index(line, "//"); commentIdx != -1 { + line = line[:commentIdx] + } + + line = strings.TrimSpace(line) + + // Skip empty lines and comment lines + if line == "" || strings.HasPrefix(line, "//") { + continue + } + + // Check if it's an import statement + if !strings.HasPrefix(line, "import ") { + continue + } + + // Extract content in quotes + // Support both "import" and import formats + line = strings.TrimPrefix(line, "import") + line = strings.TrimSpace(line) + + // Remove semicolon if present + line = strings.TrimSuffix(line, ";") + line = strings.TrimSpace(line) + + // Extract quoted content + if strings.HasPrefix(line, `"`) && strings.HasSuffix(line, `"`) { + importPath := line[1 : len(line)-1] + if importPath != "" { + imports = append(imports, importPath) + } + } else if start := strings.Index(line, `"`); start != -1 { + // Handle case where quotes are not at the beginning + end := strings.LastIndex(line, `"`) + if end > start { + importPath := line[start+1 : end] + if importPath != "" { + imports = append(imports, importPath) } } } } + return imports } @@ -216,94 +461,215 @@ func getFileNames(files map[string]string) []string { return arr } -func (t *GRPCTask) run() error { - if t.method == nil { - t.reqError = "method not initialized" - return fmt.Errorf("method nil") +func (t *GRPCTask) getFullMethod() string { + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return "" + } + reqOpt := t.AdvanceOptions.RequestOptions + if reqOpt.ProtoFiles != nil { + return reqOpt.ProtoFiles.FullMethod + } + if reqOpt.Reflection != nil { + return reqOpt.Reflection.FullMethod + } + if reqOpt.HealthCheck != nil { + return fmt.Sprintf("%s/%s", HealthCheckServiceName, HealthCheckMethodName) + } + return "" +} + +func (t *GRPCTask) getJSONRequest() string { + if t.AdvanceOptions == nil || t.AdvanceOptions.RequestOptions == nil { + return "" + } + reqOpt := t.AdvanceOptions.RequestOptions + if reqOpt.ProtoFiles != nil { + return reqOpt.ProtoFiles.JSONRequest + } + if reqOpt.Reflection != nil { + return reqOpt.Reflection.JSONRequest } + if reqOpt.HealthCheck != nil && reqOpt.HealthCheck.Service != "" { + healthReq := map[string]string{"service": reqOpt.HealthCheck.Service} + jsonReq, _ := json.Marshal(healthReq) + return string(jsonReq) + } + return "" +} +func (t *GRPCTask) run() error { start := time.Now() defer func() { t.reqCost = time.Since(start) }() - // create context with timeout + // Check configuration + opt := t.AdvanceOptions + if opt == nil || opt.RequestOptions == nil { + t.reqError = "request options required" + return nil + } + + // Create connection (new connection for each run()) ctx, cancel := context.WithTimeout(context.Background(), t.timeout) defer cancel() - // create dynamic gRPC request - msg := dynamic.NewMessage(t.method.GetInputType()) - if err := msg.UnmarshalJSON(t.JSONRequest); err != nil { - t.reqError = fmt.Sprintf("invalid message: %v", err) - return fmt.Errorf("invalid message for method %q: %w", t.method.GetName(), err) + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(t.creds), + grpc.WithBlock(), } - stub := grpcdynamic.NewStub(t.conn) - resp, err := stub.InvokeRpc(ctx, t.method, msg) + conn, err := grpc.DialContext(ctx, t.Server, dialOpts...) if err != nil { - t.reqError = err.Error() - return err + t.reqError = fmt.Sprintf("dial grpc server failed: %v", err) + return nil } + defer func() { + _ = conn.Close() + }() - // dial test message - j, err := json.Marshal(resp) + // Find method + method, err := t.findMethod(ctx, conn) if err != nil { - t.reqError = fmt.Sprintf("marshal response failed: %v", err) - return err + t.reqError = err.Error() + return nil } - t.result = j - return nil -} + reqOpt := opt.RequestOptions -func (t *GRPCTask) class() string { - return ClassGRPC -} + // Add metadata + if len(reqOpt.Metadata) > 0 { + md := metadata.New(reqOpt.Metadata) + ctx = metadata.NewOutgoingContext(ctx, md) + } -func (t *GRPCTask) metricName() string { - return "grpc_dial_testing" -} + // Build request message + msg := dynamic.NewMessage(method.GetInputType()) -func (t *GRPCTask) initTask() { - if t.Task == nil { - t.Task = &Task{} + jsonRequest := t.getJSONRequest() + if jsonRequest != "" { + if err := msg.UnmarshalJSON([]byte(jsonRequest)); err != nil { + t.reqError = fmt.Sprintf("invalid message: %v", err) + return nil + } } -} -func (t *GRPCTask) check() error { - if t.Server == "" { - return fmt.Errorf("server address is required") + // Execute RPC call + stub := grpcdynamic.NewStub(conn) + resp, err := stub.InvokeRpc(ctx, method, msg) + if err != nil { + t.reqError = err.Error() + return nil } - if t.FullMethod == "" { - return fmt.Errorf("full method is required") + + // dial test message + dynMsg, ok := resp.(*dynamic.Message) + if !ok { + t.reqError = fmt.Sprintf("unexpected response type: expected *dynamic.Message, got %T", resp) + return nil + } + + j, err := dynMsg.MarshalJSON() + if err != nil { + t.reqError = fmt.Sprintf("marshal response failed: %v", err) + return nil + } + t.result = j + + // run post script if provided + if t.PostScript != "" { + result, err := postScriptDoGRPC(t.PostScript, t.result) + if err != nil { + t.reqError = err.Error() + return nil + } + t.postScriptResult = result } + return nil } +func (t *GRPCTask) stop() { + // close connection in run() +} + func (t *GRPCTask) clear() { t.result = nil t.reqError = "" t.reqCost = 0 + t.postScriptResult = nil if t.timeout == 0 { t.timeout = DefaultGRPCTimeout } } +func (t *GRPCTask) class() string { + return ClassGRPC +} + +func (t *GRPCTask) metricName() string { + return "grpc_dial_testing" +} + func (t *GRPCTask) checkResult() ([]string, bool) { + var reasons []string + var succFlag bool + if t.reqError != "" { return []string{t.reqError}, false } if t.result == nil { return []string{"no response"}, false } - return nil, true + + // if no success conditions defined, default to success if no error + if len(t.SuccessWhen) == 0 && t.PostScript == "" { + return nil, true + } + + // check SuccessWhen conditions + for _, chk := range t.SuccessWhen { + if chk == nil { + continue + } + // check body + for _, v := range chk.Body { + if v == nil { + continue + } + if err := v.check(string(t.result), "response body"); err != nil { + reasons = append(reasons, err.Error()) + } else { + succFlag = true + } + } + + // check response time + if chk.respTime > 0 && t.reqCost > chk.respTime { + reasons = append(reasons, + fmt.Sprintf("gRPC response time(%v) larger than %v", t.reqCost, chk.respTime)) + } else if chk.respTime > 0 { + succFlag = true + } + } + + // check post script result + if t.postScriptResult != nil { + if t.postScriptResult.Result.IsFailed { + reasons = append(reasons, t.postScriptResult.Result.ErrorMessage) + } else { + succFlag = true + } + } + + return reasons, succFlag } func (t *GRPCTask) getResults() (tags map[string]string, fields map[string]interface{}) { tags = map[string]string{ "name": t.Name, "server": t.Server, - "method": t.FullMethod, + "method": t.getFullMethod(), "status": "FAIL", "proto": "grpc", } @@ -324,24 +690,36 @@ func (t *GRPCTask) getResults() (tags map[string]string, fields map[string]inter message := map[string]interface{}{} reasons, succFlag := t.checkResult() - if t.reqError != "" { - reasons = append(reasons, t.reqError) + + // check if we should save response body + notSave := false + if t.AdvanceOptions != nil && t.AdvanceOptions.Secret != nil && t.AdvanceOptions.Secret.NoSaveResponseBody { + notSave = true } - if succFlag && t.reqError == "" { - tags["status"] = "OK" - fields["success"] = int64(1) - message["response_time"] = int64(t.reqCost) / 1000 - if t.result != nil { - message["response"] = string(t.result) + // apply SuccessWhenLogic + switch t.SuccessWhenLogic { + case "or": + if succFlag && t.reqError == "" { + tags["status"] = "OK" + fields["success"] = int64(1) + } else { + message["fail_reason"] = strings.Join(reasons, ";") + fields["fail_reason"] = strings.Join(reasons, ";") + } + default: // "and" or empty (default to "and") + if succFlag && len(reasons) == 0 && t.reqError == "" { + tags["status"] = "OK" + fields["success"] = int64(1) + } else { + message["fail_reason"] = strings.Join(reasons, ";") + fields["fail_reason"] = strings.Join(reasons, ";") } - } else { - message["fail_reason"] = strings.Join(reasons, ";") - fields["fail_reason"] = strings.Join(reasons, ";") } - if t.result != nil { - fields["response"] = string(t.result) + message["response_time"] = int64(t.reqCost) / 1000 + if t.result != nil && !notSave { + message["response"] = string(t.result) } data, err := json.Marshal(message) @@ -358,11 +736,38 @@ func (t *GRPCTask) getResults() (tags map[string]string, fields map[string]inter return tags, fields } -func (t *GRPCTask) beforeFirstRender() { -} - func (t *GRPCTask) getVariableValue(variable Variable) (string, error) { - return "", fmt.Errorf("gRPC dial test does not support variable extraction") + if variable.PostScript == "" && t.PostScript == "" { + return "", fmt.Errorf("post_script is empty") + } + + if variable.TaskVarName == "" { + return "", fmt.Errorf("task variable name is empty") + } + + if t.result == nil { + return "", fmt.Errorf("response body is empty") + } + + var result *ScriptResult + var err error + if variable.PostScript == "" { // use task post script + result = t.postScriptResult + } else { // use task variable post script + if result, err = postScriptDoGRPC(variable.PostScript, t.result); err != nil { + return "", fmt.Errorf("run pipeline failed: %w", err) + } + } + + if result == nil { + return "", fmt.Errorf("pipeline result is empty") + } + + value, ok := result.Vars[variable.TaskVarName] + if !ok { + return "", fmt.Errorf("task variable name not found") + } + return fmt.Sprintf("%v", value), nil } func (t *GRPCTask) getHostName() ([]string, error) { @@ -387,6 +792,212 @@ func (t *GRPCTask) getRawTask(taskString string) (string, error) { task.Task = nil - bytes, _ := json.Marshal(task) + bytes, err := json.Marshal(task) + if err != nil { + return "", fmt.Errorf("marshal grpc task failed: %w", err) + } return string(bytes), nil } + +func (t *GRPCTask) renderSuccessWhen(task *GRPCTask, fm template.FuncMap) error { + if task == nil { + return nil + } + + if task.SuccessWhen != nil { + for index, checker := range task.SuccessWhen { + if checker == nil { + continue + } + + // body + if checker.Body != nil { + for bodyIndex, v := range checker.Body { + if v != nil { + if err := t.renderSuccessOption(v, t.SuccessWhen[index].Body[bodyIndex], fm); err != nil { + return fmt.Errorf("render body failed: %w", err) + } + } + } + } + + // response time + if checker.ResponseTime != "" { + responseTime, err := t.GetParsedString(checker.ResponseTime, fm) + if err != nil { + return fmt.Errorf("render response time failed: %w", err) + } + t.SuccessWhen[index].ResponseTime = responseTime + } + } + } + + return nil +} + +func (t *GRPCTask) setReqError(err string) { + t.reqError = err +} + +func (t *GRPCTask) renderTemplate(fm template.FuncMap) error { + if t.rawTask == nil { + task := &GRPCTask{} + if err := t.NewRawTask(task); err != nil { + return fmt.Errorf("new raw task failed: %w", err) + } + t.rawTask = task + } + + task := t.rawTask + if task == nil { + return errors.New("raw task is nil") + } + + // server + server, err := t.GetParsedString(task.Server, fm) + if err != nil { + return fmt.Errorf("render server failed: %w", err) + } + t.Server = server + + // success when + if err := t.renderSuccessWhen(task, fm); err != nil { + return fmt.Errorf("render success when failed: %w", err) + } + + // advance options + if err := t.renderAdvanceOptions(task, fm); err != nil { + return fmt.Errorf("render advance options failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderAdvanceOptions(task *GRPCTask, fm template.FuncMap) error { + if task == nil || task.AdvanceOptions == nil { + return nil + } + + // request options + if err := t.renderRequestOptions(task.AdvanceOptions.RequestOptions, fm); err != nil { + return fmt.Errorf("render request options failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderRequestOptions(requestOpt *GRPCOptRequest, fm template.FuncMap) error { + if requestOpt == nil { + return nil + } + + // request timeout + if requestOpt.RequestTimeout != "" { + timeout, err := t.GetParsedString(requestOpt.RequestTimeout, fm) + if err != nil { + return fmt.Errorf("render timeout failed: %w", err) + } + t.AdvanceOptions.RequestOptions.RequestTimeout = timeout + } + + // metadata + if len(requestOpt.Metadata) > 0 { + for k, v := range requestOpt.Metadata { + key, err := t.GetParsedString(k, fm) + if err != nil { + return fmt.Errorf("render metadata key %q failed: %w", k, err) + } + value, err := t.GetParsedString(v, fm) + if err != nil { + return fmt.Errorf("render metadata value for key %q failed: %w", k, err) + } + delete(t.AdvanceOptions.RequestOptions.Metadata, k) + t.AdvanceOptions.RequestOptions.Metadata[key] = value + } + } + + // proto files discovery + if err := t.renderProtoFiles(requestOpt.ProtoFiles, fm); err != nil { + return fmt.Errorf("render proto files failed: %w", err) + } + + // reflection discovery + if err := t.renderReflection(requestOpt.Reflection, fm); err != nil { + return fmt.Errorf("render reflection failed: %w", err) + } + + // health check discovery + if err := t.renderHealthCheck(requestOpt.HealthCheck, fm); err != nil { + return fmt.Errorf("render health check failed: %w", err) + } + + return nil +} + +func (t *GRPCTask) renderProtoFiles(protoFiles *GRPCProtoFilesDiscovery, fm template.FuncMap) error { + if protoFiles == nil { + return nil + } + + if protoFiles.FullMethod != "" { + fullMethod, err := t.GetParsedString(protoFiles.FullMethod, fm) + if err != nil { + return fmt.Errorf("render proto files full method failed: %w", err) + } + // if full method is changed, clear the cached method descriptor + if t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod != fullMethod { + t.methodDescriptor = nil + } + t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod = fullMethod + } + + if protoFiles.JSONRequest != "" { + jsonRequest, err := t.GetParsedString(protoFiles.JSONRequest, fm) + if err != nil { + return fmt.Errorf("render proto files JSON request failed: %w", err) + } + t.AdvanceOptions.RequestOptions.ProtoFiles.JSONRequest = jsonRequest + } + + return nil +} + +func (t *GRPCTask) renderReflection(reflection *GRPCReflectionDiscovery, fm template.FuncMap) error { + if reflection == nil { + return nil + } + + if reflection.FullMethod != "" { + fullMethod, err := t.GetParsedString(reflection.FullMethod, fm) + if err != nil { + return fmt.Errorf("render reflection full method failed: %w", err) + } + t.AdvanceOptions.RequestOptions.Reflection.FullMethod = fullMethod + } + + if reflection.JSONRequest != "" { + jsonRequest, err := t.GetParsedString(reflection.JSONRequest, fm) + if err != nil { + return fmt.Errorf("render reflection JSON request failed: %w", err) + } + t.AdvanceOptions.RequestOptions.Reflection.JSONRequest = jsonRequest + } + + return nil +} + +func (t *GRPCTask) renderHealthCheck(healthCheck *GRPCHealthCheckDiscovery, fm template.FuncMap) error { + if healthCheck == nil { + return nil + } + + if healthCheck.Service != "" { + service, err := t.GetParsedString(healthCheck.Service, fm) + if err != nil { + return fmt.Errorf("render health check service failed: %w", err) + } + t.AdvanceOptions.RequestOptions.HealthCheck.Service = service + } + + return nil +} diff --git a/dialtesting/grpc_script.go b/dialtesting/grpc_script.go new file mode 100644 index 00000000..92ee2377 --- /dev/null +++ b/dialtesting/grpc_script.go @@ -0,0 +1,140 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/GuanceCloud/cliutils/point" + "github.com/GuanceCloud/pipeline-go/lang" + "github.com/GuanceCloud/pipeline-go/lang/platypus" + "github.com/GuanceCloud/pipeline-go/ptinput" +) + +type ScriptGRPCRequestResponse struct { + Body string `json:"body"` +} + +func (h *ScriptGRPCRequestResponse) String() (string, error) { + bytes, err := json.Marshal(h) + if err != nil { + return "", fmt.Errorf("response marshal failed: %w", err) + } + return string(bytes), nil +} + +type ScriptGRPCMessage struct { + Response *ScriptGRPCRequestResponse `json:"response"` + Vars *Vars `json:"vars"` +} + +// postScriptDoGRPC run pipeline script for gRPC response and return result. +// +// bodyBytes is the JSON body of the gRPC response. +func postScriptDoGRPC(script string, bodyBytes []byte) (*ScriptResult, error) { + if script == "" || bodyBytes == nil { + return &ScriptResult{}, nil + } + + response := &ScriptGRPCRequestResponse{ + Body: string(bodyBytes), + } + + result, err := runPipelineGRPC(script, response, nil) + if err != nil { + return nil, fmt.Errorf("run pipeline failed: %w", err) + } + return result, nil +} + +func runPipelineGRPC(script string, response *ScriptGRPCRequestResponse, vars *Vars) (*ScriptResult, error) { + scriptName := "script" + + script = fmt.Sprintf(` + content = load_json(_) + response = content["response"] + vars = content["vars"] + result = {} + + %s + + add_key(result, result) + add_key(vars, vars) + `, script) + + pls, errs := platypus.NewScripts( + map[string]string{scriptName: script}, + lang.WithCat(point.Logging), + ) + + defer func() { + for _, pl := range pls { + pl.Cleanup() + } + }() + + for k, v := range errs { + return nil, fmt.Errorf("new scripts failed: %s, %w", k, v) + } + + pl, ok := pls[scriptName] + if !ok { + return nil, fmt.Errorf("script %s not found", scriptName) + } + + if vars == nil { + vars = &Vars{} + } + + message := &ScriptGRPCMessage{ + Response: response, + Vars: vars, + } + + messageBytes, err := json.Marshal(message) + if err != nil { + return nil, fmt.Errorf("message marshal failed: %w", err) + } + messageString := string(messageBytes) + + fileds := map[string]interface{}{ + "message": messageString, + } + + pt := ptinput.NewPlPoint(point.Logging, "test", nil, fileds, time.Now()) + + if err := pl.Run(pt, nil, nil); err != nil { + return nil, fmt.Errorf("run failed: %w", err) + } + + resultFields := pt.Fields() + + result := ScriptHTTPResult{} + + if val, ok := resultFields["result"]; !ok { + return nil, fmt.Errorf("result not found") + } else if err := json.Unmarshal([]byte(getFiledString(val)), &result); err != nil { + return nil, fmt.Errorf("unmarshal result failed: %w", err) + } + + if val, ok := resultFields["vars"]; !ok { + return nil, fmt.Errorf("vars not found") + } else if err := json.Unmarshal([]byte(getFiledString(val)), &vars); err != nil { + return nil, fmt.Errorf("unmarshal vars failed: %w", err) + } + + // limit error message length + if len(result.ErrorMessage) > MaxErrorMessageSize { + result.ErrorMessage = result.ErrorMessage[:MaxErrorMessageSize] + "..." + } + + return &ScriptResult{ + Result: result, + Vars: *vars, + }, nil +} diff --git a/dialtesting/grpc_script_test.go b/dialtesting/grpc_script_test.go new file mode 100644 index 00000000..9e4bf26b --- /dev/null +++ b/dialtesting/grpc_script_test.go @@ -0,0 +1,141 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package dialtesting + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPostScriptDoGRPC(t *testing.T) { + t.Run("success - extract message field", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["message"] = body["message"] +result["is_failed"] = false + ` + + body := []byte(`{"message":"你好, test! 这是来自 gRPC 的问候"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "你好, test! 这是来自 gRPC 的问候", result.Vars["message"]) + }) + + t.Run("success - extract multiple fields", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["message"] = body["message"] +vars["status"] = body["status"] +result["is_failed"] = false + ` + + body := []byte(`{"message":"hello","status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "hello", result.Vars["message"]) + assert.Equal(t, "ok", result.Vars["status"]) + }) + + t.Run("failure - missing required field", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +if body["message"] != nil { + vars["message"] = body["message"] + result["is_failed"] = false +} else { + result["is_failed"] = true + result["error_message"] = "响应中缺少 message 字段" +} + ` + + body := []byte(`{"status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.True(t, result.Result.IsFailed) + assert.Equal(t, "响应中缺少 message 字段", result.Result.ErrorMessage) + }) + + t.Run("failure - custom error", func(t *testing.T) { + script := ` +result["is_failed"] = true +result["error_message"] = "custom error message" + ` + + body := []byte(`{"message":"hello"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.True(t, result.Result.IsFailed) + assert.Equal(t, "custom error message", result.Result.ErrorMessage) + }) + + t.Run("empty script", func(t *testing.T) { + script := "" + + body := []byte(`{"message":"hello"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + }) + + t.Run("nil body", func(t *testing.T) { + script := ` +vars["test"] = "value" +result["is_failed"] = false + ` + + result, err := postScriptDoGRPC(script, nil) + assert.NoError(t, err) + assert.NotNil(t, result) + }) + + t.Run("invalid JSON in response body", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +if body != nil { + vars["message"] = body["message"] + result["is_failed"] = false +} else { + result["is_failed"] = true + result["error_message"] = "invalid JSON" +} + ` + + body := []byte(`invalid json`) + + result, _ := postScriptDoGRPC(script, body) + assert.NotNil(t, result) + }) + + t.Run("complex nested JSON", func(t *testing.T) { + script := ` +body = load_json(response["body"]) +vars["user_name"] = body["user"]["name"] +vars["user_age"] = body["user"]["age"] +result["is_failed"] = false + ` + + body := []byte(`{"user":{"name":"test","age":25},"status":"ok"}`) + + result, err := postScriptDoGRPC(script, body) + assert.NoError(t, err) + assert.NotNil(t, result) + assert.False(t, result.Result.IsFailed) + assert.Equal(t, "test", result.Vars["user_name"]) + assert.Equal(t, float64(25), result.Vars["user_age"]) // JSON 数字会被解析为 float64 + }) +} diff --git a/dialtesting/grpc_test.go b/dialtesting/grpc_test.go index 5f7243d4..725339b6 100644 --- a/dialtesting/grpc_test.go +++ b/dialtesting/grpc_test.go @@ -6,239 +6,156 @@ package dialtesting import ( - "context" "encoding/json" - "log" - "net" "os" T "testing" + "text/template" "time" - pb "github.com/GuanceCloud/cliutils/dialtesting/greeter" "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" ) -type server struct { - pb.UnimplementedGreeterServer -} - -func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - log.Printf("recv req from %v", in.GetName()) - return &pb.HelloReply{Message: "hello " + in.GetName()}, nil -} - -func TestGRPCDial(t *T.T) { - lsn, err := net.Listen("tcp", ":0") - assert.NoError(t, err) - - t.Logf("listen on %s", lsn.Addr().String()) - s := grpc.NewServer() - pb.RegisterGreeterServer(s, &server{}) - - healthSrv := health.NewServer() - - healthSrv.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) - - // we can set specific service's status - healthSrv.SetServingStatus("greeter.Greeter", grpc_health_v1.HealthCheckResponse_SERVING) - - grpc_health_v1.RegisterHealthServer(s, healthSrv) - reflection.Register(s) - - go func() { - assert.NoError(t, s.Serve(lsn)) // start server - }() - - time.Sleep(time.Second) // wait - - t.Run(`dial-on-health-check(with-reflection)`, func(t *T.T) { - task := &GRPCTask{ - Server: lsn.Addr().String(), - FullMethod: "greeter.Greeter/SayHello", - } - - assert.NoError(t, task.init()) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cli := grpc_health_v1.NewHealthClient(task.conn) - req := &grpc_health_v1.HealthCheckRequest{ - // set service name for specifi service - Service: "greeter.Greeter", - } - - resp, err := cli.Check(ctx, req) - assert.NoError(t, err) - assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.GetStatus()) - }) - - t.Run(`dial-on-health-check`, func(t *T.T) { - task := &GRPCTask{ - Server: lsn.Addr().String(), - } - - assert.NoError(t, task.init()) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cli := grpc_health_v1.NewHealthClient(task.conn) - req := &grpc_health_v1.HealthCheckRequest{ - // set service name for specifi service - Service: "greeter.Greeter", - } - - resp, err := cli.Check(ctx, req) - assert.NoError(t, err) - assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.GetStatus()) - }) - - t.Run(`dial-on-health-check(service-not-exist)`, func(t *T.T) { +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_Check(t *T.T) { + t.Run("missing server", func(t *T.T) { task := &GRPCTask{ - Server: lsn.Addr().String(), - } - - assert.NoError(t, task.init()) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cli := grpc_health_v1.NewHealthClient(task.conn) - req := &grpc_health_v1.HealthCheckRequest{ - // the service not exist - Service: "greeter.SomeServiceNotExist", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": "syntax = \"proto3\";", + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, } - - resp, err := cli.Check(ctx, req) + task.initTask() + err := task.check() assert.Error(t, err) - assert.Equal(t, grpc_health_v1.HealthCheckResponse_UNKNOWN, resp.GetStatus()) + assert.Contains(t, err.Error(), "server address is required") }) - t.Run(`dial-on-proto-file(with-behavior)`, func(t *T.T) { - proto, err := os.ReadFile("greeter/greeter.proto") - assert.NoError(t, err) - - hr := &pb.HelloRequest{ - Name: "world", - } - - j, err := json.Marshal(hr) - assert.NoError(t, err) - + t.Run("missing proto files", func(t *T.T) { task := &GRPCTask{ - Server: lsn.Addr().String(), - FullMethod: "greeter.Greeter/SayHello", - ProtoFiles: map[string]string{ - "greeter.proto": string(proto), + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{}, // 空的 proto files + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, }, - JSONRequest: j, - } - - assert.NoError(t, task.init()) - assert.NoError(t, task.run()) - - defer task.stop() - - t.Logf("result: %s", string(task.result)) - }) -} - -func TestGRPCTask_Check(t *T.T) { - t.Run("valid task", func(t *T.T) { - task := &GRPCTask{ - Server: "localhost:50051", - FullMethod: "greeter.Greeter/SayHello", } task.initTask() - assert.NoError(t, task.check()) + err := task.check() + assert.Error(t, err) + assert.Contains(t, err.Error(), "proto files not provided") }) - t.Run("missing server", func(t *T.T) { + t.Run("missing full method", func(t *T.T) { task := &GRPCTask{ - FullMethod: "greeter.Greeter/SayHello", + Server: "localhost:50051", + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, } task.initTask() err := task.check() assert.Error(t, err) - assert.Contains(t, err.Error(), "server address is required") + assert.Contains(t, err.Error(), "full method is required") }) - t.Run("missing full method", func(t *T.T) { + t.Run("missing check rule", func(t *T.T) { task := &GRPCTask{ Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + ProtoFiles: map[string]string{ + "greeter.proto": "syntax = \"proto3\";", + }, + }, + }, + }, } task.initTask() err := task.check() assert.Error(t, err) - assert.Contains(t, err.Error(), "full method is required") + assert.Contains(t, err.Error(), "no any check rule") }) } func TestGRPCTask_Init(t *T.T) { - serverAddr := "localhost:50051" // Change to your server address - - t.Run("init with reflection", func(t *T.T) { + serverAddr := "localhost:50052" + t.Run("init with default timeout", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, } task.initTask() err := task.init() assert.NoError(t, err) - assert.NotNil(t, task.conn) - assert.NotNil(t, task.method) assert.Equal(t, DefaultGRPCTimeout, task.timeout) - - defer task.stop() - }) - - t.Run("init with proto files", func(t *T.T) { - proto, err := os.ReadFile("greeter/greeter.proto") - assert.NoError(t, err) - - task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", - ProtoFiles: map[string]string{ - "greeter.proto": string(proto), - }, - } - task.initTask() - - err = task.init() - assert.NoError(t, err) - assert.NotNil(t, task.conn) - assert.NotNil(t, task.method) - - defer task.stop() }) t.Run("init with custom timeout", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", - Timeout: "10s", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "10s", + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, } task.initTask() err := task.init() assert.NoError(t, err) assert.Equal(t, 10*time.Second, task.timeout) - - defer task.stop() }) t.Run("init with invalid timeout", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", - Timeout: "invalid", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "invalid", + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, } task.initTask() @@ -247,84 +164,135 @@ func TestGRPCTask_Init(t *T.T) { assert.Contains(t, err.Error(), "invalid timeout") }) - t.Run("init with invalid server", func(t *T.T) { + t.Run("init with response time in success checker", func(t *T.T) { task := &GRPCTask{ - Server: "invalid:99999", - FullMethod: "greeter.Greeter/SayHello", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "1s", + }, + }, } task.initTask() err := task.init() - assert.Error(t, err) + assert.NoError(t, err) + assert.Equal(t, 1*time.Second, task.SuccessWhen[0].respTime) }) - t.Run("init with method not found", func(t *T.T) { + t.Run("init with invalid response time", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/NotFoundMethod", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "invalid", + }, + }, } task.initTask() err := task.init() assert.Error(t, err) - assert.Contains(t, err.Error(), "not found") + assert.Contains(t, err.Error(), "invalid response time") }) -} - -func TestGRPCTask_Run(t *T.T) { - serverAddr := "localhost:50051" // Change to your server address - - t.Run("run success", func(t *T.T) { - greeterProto, err := os.ReadFile("greeter/greeter.proto") - assert.NoError(t, err) - userProto, err := os.ReadFile("greeter/user.proto") - assert.NoError(t, err) + t.Run("init with body regex in success checker", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHelloToUser", - ProtoFiles: map[string]string{ - "greeter.proto": string(greeterProto), - "user.proto": string(userProto), + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, }, } task.initTask() - requestData := map[string]interface{}{ - "user_id": 1, - } - jsonRequest, err := json.Marshal(requestData) - assert.NoError(t, err) - task.JSONRequest = jsonRequest - - err = task.init() + err := task.init() assert.NoError(t, err) + }) - err = task.run() - assert.NoError(t, err) - assert.NotNil(t, task.result) - assert.NotEmpty(t, task.result) - assert.Empty(t, task.reqError) - assert.Greater(t, task.reqCost, time.Duration(0)) + t.Run("init with invalid regex in body", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {MatchRegex: "[invalid regex"}, + }, + }, + }, + } + task.initTask() - defer task.stop() + err := task.init() + assert.Error(t, err) + assert.Contains(t, err.Error(), "compile regex failed") }) - t.Run("run without init", func(t *T.T) { + t.Run("init with TLS certificate - ignore server cert error", func(t *T.T) { task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + Certificate: &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + }, + }, } task.initTask() - err := task.run() - assert.Error(t, err) - assert.Contains(t, err.Error(), "method nil") + err := task.init() + assert.NoError(t, err) + assert.NotNil(t, task.creds) }) } +//nolint:golint // Requires real gRPC server access func TestGRPCTask_GetResults(t *T.T) { - serverAddr := "localhost:50051" // Change to your server address + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } t.Run("success result", func(t *T.T) { task := &GRPCTask{ @@ -334,22 +302,44 @@ func TestGRPCTask_GetResults(t *T.T) { "env": "test", }, }, - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: `body = load_json(response["body"]) +if body != nil && body["msg"] != nil { + result["is_failed"] = false + vars["msg"] = body["msg"] +} else { + result["is_failed"] = true + result["error_message"] = "响应中缺少 msg 字段" +}`, } task.initTask() - requestData := map[string]interface{}{"name": "test"} - jsonRequest, _ := json.Marshal(requestData) - task.JSONRequest = jsonRequest + err := task.check() + assert.NoError(t, err) - err := task.init() + err = task.init() assert.NoError(t, err) err = task.run() assert.NoError(t, err) tags, fields := task.getResults() + t.Logf("tags: %v, fields: %v", tags, fields) // Check tags assert.Equal(t, "test-task", tags["name"]) @@ -362,10 +352,7 @@ func TestGRPCTask_GetResults(t *T.T) { // Check fields assert.Equal(t, int64(1), fields["success"]) assert.Greater(t, fields["response_time"], int64(0)) - assert.NotNil(t, fields["response"]) assert.NotNil(t, fields["message"]) - - defer task.stop() }) t.Run("failure result", func(t *T.T) { @@ -373,19 +360,31 @@ func TestGRPCTask_GetResults(t *T.T) { Task: &Task{ Name: "test-task-fail", }, - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `invalid json`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: "", } task.initTask() - // Set invalid request to cause error - task.JSONRequest = []byte(`invalid json`) - err := task.init() assert.NoError(t, err) err = task.run() - assert.Error(t, err) + assert.NoError(t, err) tags, fields := task.getResults() @@ -395,8 +394,6 @@ func TestGRPCTask_GetResults(t *T.T) { // Check fields assert.Equal(t, int64(-1), fields["success"]) assert.NotNil(t, fields["fail_reason"]) - - defer task.stop() }) } @@ -431,9 +428,9 @@ func TestGRPCTask_OtherMethods(t *T.T) { }) t.Run("checkResult", func(t *T.T) { - t.Run("success", func(t *T.T) { + t.Run("success without conditions", func(t *T.T) { task := &GRPCTask{ - result: []byte("test"), + result: []byte(`{"message":"hello test"}`), } reasons, flag := task.checkResult() assert.Nil(t, reasons) @@ -447,29 +444,69 @@ func TestGRPCTask_OtherMethods(t *T.T) { reasons, flag := task.checkResult() assert.NotEmpty(t, reasons) assert.False(t, flag) + assert.Equal(t, "test error", reasons[0]) }) t.Run("no response", func(t *T.T) { - task := &GRPCTask{} + task := &GRPCTask{ + PostScript: "", + } reasons, flag := task.checkResult() assert.NotEmpty(t, reasons) assert.False(t, flag) + assert.Contains(t, reasons[0], "no response") }) - }) - t.Run("getHostName", func(t *T.T) { - t.Run("with port", func(t *T.T) { + t.Run("success with body check", func(t *T.T) { task := &GRPCTask{ - Server: "localhost:50051", + result: []byte(`{"message":"hello test"}`), + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "hello"}, + }, + }, + }, } - hostnames, err := task.getHostName() - assert.NoError(t, err) - assert.Equal(t, []string{"localhost"}, hostnames) + // Initialize regex patterns + for _, checker := range task.SuccessWhen { + for _, v := range checker.Body { + err := genReg(v) + assert.NoError(t, err) + } + } + reasons, flag := task.checkResult() + assert.Empty(t, reasons) + assert.True(t, flag) }) - t.Run("without port", func(t *T.T) { + t.Run("failure with body check", func(t *T.T) { task := &GRPCTask{ - Server: "localhost", + result: []byte(`{"message":"hello test"}`), + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "notfound"}, + }, + }, + }, + } + // Initialize regex patterns + for _, checker := range task.SuccessWhen { + for _, v := range checker.Body { + err := genReg(v) + assert.NoError(t, err) + } + } + reasons, _ := task.checkResult() + assert.NotEmpty(t, reasons) + }) + }) + + t.Run("getHostName", func(t *T.T) { + t.Run("with port", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50051", } hostnames, err := task.getHostName() assert.NoError(t, err) @@ -484,17 +521,36 @@ func TestGRPCTask_OtherMethods(t *T.T) { }) t.Run("getVariableValue", func(t *T.T) { - task := &GRPCTask{} - _, err := task.getVariableValue(Variable{}) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not support") + t.Run("without post script", func(t *T.T) { + task := &GRPCTask{} + _, err := task.getVariableValue(Variable{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "post_script is empty") + }) + + t.Run("without result", func(t *T.T) { + task := &GRPCTask{ + PostScript: "vars[\"test\"] = \"value\"", + } + _, err := task.getVariableValue(Variable{ + TaskVarName: "test", + }) + assert.Error(t, err) + assert.Contains(t, err.Error(), "response body is empty") + }) }) t.Run("getRawTask", func(t *T.T) { task := &GRPCTask{ - Server: "localhost:50051", - FullMethod: "greeter.Greeter/SayHello", - Timeout: "30s", + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "30s", + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + }, + }, + }, } task.initTask() @@ -506,41 +562,13 @@ func TestGRPCTask_OtherMethods(t *T.T) { err = json.Unmarshal([]byte(rawTask), &parsed) assert.NoError(t, err) assert.Equal(t, task.Server, parsed.Server) - assert.Equal(t, task.FullMethod, parsed.FullMethod) - assert.Equal(t, task.Timeout, parsed.Timeout) - }) -} - -func TestGRPCTask_Timeout(t *T.T) { - serverAddr := "localhost:50051" // Change to your server address - - t.Run("timeout works", func(t *T.T) { - task := &GRPCTask{ - Server: serverAddr, - FullMethod: "greeter.Greeter/SayHello", - Timeout: "100ms", - } - task.initTask() - - requestData := map[string]interface{}{"name": "test"} - jsonRequest, _ := json.Marshal(requestData) - task.JSONRequest = jsonRequest - - err := task.init() - assert.NoError(t, err) - - // Test that timeout is set correctly - assert.Equal(t, 100*time.Millisecond, task.timeout) - - err = task.run() - assert.NoError(t, err) // Should succeed within timeout - - defer task.stop() + assert.Equal(t, task.getFullMethod(), parsed.getFullMethod()) + assert.Equal(t, task.AdvanceOptions.RequestOptions.RequestTimeout, parsed.AdvanceOptions.RequestOptions.RequestTimeout) }) } func TestBuildExtendedProtoMap(t *T.T) { - t.Run("with import path matching", func(t *T.T) { + t.Run("with all imports present", func(t *T.T) { greeterProto := `syntax = "proto3"; package greeter; import "greeter/user.proto"; @@ -555,56 +583,40 @@ message GetUserRequest { }` protoFiles := map[string]string{ - "greeter.proto": greeterProto, - "user.proto": userProto, + "greeter.proto": greeterProto, + "greeter/user.proto": userProto, } - extendedMap := buildExtendedProtoMap(protoFiles) + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) // Check original files are preserved assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) - assert.Equal(t, userProto, extendedMap["user.proto"]) - - // Check base names are added - assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) - assert.Equal(t, userProto, extendedMap["user.proto"]) - - // Check import path is mapped - assert.Equal(t, userProto, extendedMap["greeter/user.proto"], "import path should be mapped to user.proto content") + assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + assert.Equal(t, 2, len(extendedMap)) }) - t.Run("with full path already in map", func(t *T.T) { + t.Run("with missing import", func(t *T.T) { greeterProto := `syntax = "proto3"; package greeter; import "greeter/user.proto"; -service Greeter {} -` - - userProto := `syntax = "proto3"; -package user; -message GetUserRequest {} -` +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +}` protoFiles := map[string]string{ - "greeter/greeter.proto": greeterProto, - "greeter/user.proto": userProto, + "greeter.proto": greeterProto, + // user.proto is missing } - extendedMap := buildExtendedProtoMap(protoFiles) - - // Check original paths are preserved - assert.Equal(t, greeterProto, extendedMap["greeter/greeter.proto"]) - assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) - - // Check base names are added - assert.Equal(t, greeterProto, extendedMap["greeter.proto"]) - assert.Equal(t, userProto, extendedMap["user.proto"]) - - // Import path should already exist (no need to match) - assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing imports") + assert.Contains(t, err.Error(), "greeter/user.proto") + assert.Nil(t, extendedMap) }) - t.Run("with multiple imports", func(t *T.T) { + t.Run("with multiple imports all present", func(t *T.T) { mainProto := `syntax = "proto3"; package main; import "greeter/user.proto"; @@ -623,16 +635,45 @@ message Common {} ` protoFiles := map[string]string{ - "main.proto": mainProto, - "user.proto": userProto, - "common.proto": commonProto, + "main.proto": mainProto, + "greeter/user.proto": userProto, + "greeter/common.proto": commonProto, } - extendedMap := buildExtendedProtoMap(protoFiles) + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) - // Check all imports are mapped + // Check all files are preserved + assert.Equal(t, mainProto, extendedMap["main.proto"]) assert.Equal(t, userProto, extendedMap["greeter/user.proto"]) assert.Equal(t, commonProto, extendedMap["greeter/common.proto"]) + assert.Equal(t, 3, len(extendedMap)) + }) + + t.Run("with multiple imports one missing", func(t *T.T) { + mainProto := `syntax = "proto3"; +package main; +import "greeter/user.proto"; +import "greeter/common.proto"; +service Main {} +` + + userProto := `syntax = "proto3"; +package user; +message User {} +` + + protoFiles := map[string]string{ + "main.proto": mainProto, + "greeter/user.proto": userProto, + // greeter/common.proto is missing + } + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing imports") + assert.Contains(t, err.Error(), "greeter/common.proto") + assert.Nil(t, extendedMap) }) t.Run("with no imports", func(t *T.T) { @@ -640,24 +681,653 @@ message Common {} "simple.proto": `syntax = "proto3"; package simple;`, } - extendedMap := buildExtendedProtoMap(protoFiles) + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) - // If original key is already just filename, filepath.Base returns same value - // So map will have 1 entry (same key set twice) + // Should have 1 entry (original file) assert.Equal(t, 1, len(extendedMap)) assert.NotEmpty(t, extendedMap["simple.proto"]) }) - t.Run("with path in filename", func(t *T.T) { + t.Run("with path in filename and no imports", func(t *T.T) { protoFiles := map[string]string{ "path/to/simple.proto": `syntax = "proto3"; package simple;`, } - extendedMap := buildExtendedProtoMap(protoFiles) + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) - // Should have 2 entries: original path and base name - assert.Equal(t, 2, len(extendedMap)) + // Should have 1 entry (original path preserved) + assert.Equal(t, 1, len(extendedMap)) assert.NotEmpty(t, extendedMap["path/to/simple.proto"]) - assert.NotEmpty(t, extendedMap["simple.proto"]) + }) + + t.Run("with empty proto files", func(t *T.T) { + protoFiles := map[string]string{} + + extendedMap, err := buildExtendedProtoMap(protoFiles) + assert.NoError(t, err) + assert.Equal(t, 0, len(extendedMap)) + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_PostScript(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } + + t.Run("post script success", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: ` +body = load_json(response["body"]) +vars["msg"] = body["msg"] +result["is_failed"] = false + `, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + if task.reqError != "" { + t.Fatalf("RPC call failed: %s", task.reqError) + } + assert.NotNil(t, task.postScriptResult, "postScriptResult should not be nil, reqError: %s", task.reqError) + if task.postScriptResult != nil { + assert.Equal(t, "你好, test! 这是来自 gRPC 的问候", task.postScriptResult.Vars["msg"]) + } + }) + + t.Run("post script failure", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + PostScript: ` +result["is_failed"] = true +result["error_message"] = "custom error" + `, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) // script runs but marks as failed + assert.NotNil(t, task.postScriptResult) + assert.True(t, task.postScriptResult.Result.IsFailed) + + reasons, flag := task.checkResult() + assert.False(t, flag) + assert.NotEmpty(t, reasons) + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_SuccessWhen(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Fatalf("Failed to read greeter.proto: %v", err) + } + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Fatalf("Failed to read common.proto: %v", err) + } + + t.Run("success with body contains", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "test"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + reasons, flag := task.checkResult() + assert.True(t, flag) + assert.Empty(t, reasons) + + tags, fields := task.getResults() + assert.Equal(t, "OK", tags["status"]) + assert.Equal(t, int64(1), fields["success"]) + }) + + t.Run("failure with body check", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "notfound"}, + }, + }, + }, + SuccessWhenLogic: "and", + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + reasons, _ := task.checkResult() + // With "and" logic, if condition fails, it should fail + assert.NotEmpty(t, reasons) + }) + + t.Run("success with response time", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "10s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + + _, flag := task.checkResult() + assert.True(t, flag) + }) + + t.Run("failure with response time exceeded", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + }, + }, + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "1ms", // very short timeout + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + // Add a small delay to ensure response time exceeds threshold + time.Sleep(10 * time.Millisecond) + + err = task.run() + assert.NoError(t, err) + + reasons, flag := task.checkResult() + // Response time check may pass or fail depending on actual response time + _ = reasons + _ = flag + }) +} + +//nolint:golint // Requires real gRPC server access +func TestGRPCTask_RequestDiscoveryModes(t *T.T) { + t.Skip("Skipping test that requires real gRPC server") + serverAddr := "localhost:50051" + // 读取 proto 文件 + greeterProto, err := os.ReadFile("grpcproto/greeter.proto") + if err != nil { + t.Skipf("无法读取 greeter.proto 文件: %v", err) + } + + commonProto, err := os.ReadFile("grpcproto/common.proto") + if err != nil { + t.Skipf("无法读取 common.proto 文件: %v", err) + } + + // 模式1: ProtoFiles 发现模式 + t.Run("Request mode 1: ProtoFiles discovery", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"test"}`, + ProtoFiles: map[string]string{ + "greeter.proto": string(greeterProto), + "grpcproto/common.proto": string(commonProto), + }, + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhenLogic: "and", + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "test"}, + }, + ResponseTime: "1s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + + tags, fields := task.getResults() + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("ProtoFiles mode - tags: %v, fields: %v", tags, fields) + }) + + // 模式2: Reflection 发现模式 + t.Run("Request mode 2: Reflection discovery", func(t *T.T) { + task := &GRPCTask{ + Server: serverAddr, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "greeter.Greeter/SayHello", + JSONRequest: `{"name":"reflection-test"}`, + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "reflection-test"}, + }, + ResponseTime: "1s", + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + + tags, fields := task.getResults() + assert.Equal(t, "greeter.Greeter/SayHello", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("Reflection mode - tags: %v, fields: %v", tags, fields) + }) + + // 模式3: HealthCheck 发现模式 + t.Run("Request mode 3: HealthCheck discovery", func(t *T.T) { + task := &GRPCTask{ + Server: "localhost:50053", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + HealthCheck: &GRPCHealthCheckDiscovery{ + // Service: "greeter.Greeter", + }, + Metadata: map[string]string{ + "api-key": "test-key-123", + }, + RequestTimeout: "1s", + }, + Certificate: func() *GRPCOptCertificate { + // 回退到跳过证书验证模式 + return &GRPCOptCertificate{ + IgnoreServerCertificateError: true, + } + }(), + }, + SuccessWhen: []*GRPCSuccess{ + { + Body: []*SuccessOption{ + {Contains: "SERVING"}, + }, + }, + }, + } + task.initTask() + + err := task.init() + assert.NoError(t, err) + + err = task.run() + assert.NoError(t, err) + assert.Empty(t, task.reqError) + assert.NotNil(t, task.result) + assert.Contains(t, string(task.result), "SERVING") + + tags, fields := task.getResults() + assert.Equal(t, "grpc.health.v1.Health/Check", tags["method"]) + assert.Equal(t, "OK", tags["status"]) + t.Logf("HealthCheck mode - tags: %v, fields: %v", tags, fields) + }) +} + +func TestGRPCTask_RenderTemplate(t *T.T) { + t.Run("render template with all fields", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "{{server_host}}:{{server_port}}", + // PostScript is not rendered by template (static script content) + PostScript: "vars[\"message\"] = \"hello\"", + SuccessWhen: []*GRPCSuccess{ + { + ResponseTime: "{{response_time}}", + Body: []*SuccessOption{ + {Contains: "{{body_contains}}"}, + }, + }, + }, + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + RequestTimeout: "{{timeout}}", + Metadata: map[string]string{ + "{{metadata_key}}": "{{metadata_value}}", + }, + ProtoFiles: &GRPCProtoFilesDiscovery{ + FullMethod: "{{service}}.{{method}}/{{rpc}}", + JSONRequest: "{{json_request}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "server_host": func() string { + return "localhost" + }, + "server_port": func() string { + return "50051" + }, + "response_time": func() string { + return "100ms" + }, + "body_contains": func() string { + return "success" + }, + "timeout": func() string { + return "5s" + }, + "metadata_key": func() string { + return "api-key" + }, + "metadata_value": func() string { + return "test-key-123" + }, + "service": func() string { + return "greeter" + }, + "method": func() string { + return "Greeter" + }, + "rpc": func() string { + return "SayHello" + }, + "json_request": func() string { + return `{"name":"test"}` + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + // Verify server + assert.Equal(t, "localhost:50051", ct.Server) + + // Verify post script (not rendered, should remain unchanged) + assert.Equal(t, "vars[\"message\"] = \"hello\"", ct.PostScript) + + // Verify success when + assert.Equal(t, "100ms", ct.SuccessWhen[0].ResponseTime) + assert.Equal(t, "success", ct.SuccessWhen[0].Body[0].Contains) + + // Verify advance options + assert.Equal(t, "5s", ct.AdvanceOptions.RequestOptions.RequestTimeout) + assert.Equal(t, "test-key-123", ct.AdvanceOptions.RequestOptions.Metadata["api-key"]) + assert.Equal(t, "greeter.Greeter/SayHello", ct.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod) + assert.Equal(t, `{"name":"test"}`, ct.AdvanceOptions.RequestOptions.ProtoFiles.JSONRequest) + }) + + t.Run("render template with empty raw task", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + } + + fm := template.FuncMap{} + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + assert.Equal(t, "localhost:50051", ct.Server) + }) + + t.Run("render template with invalid template", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "{{invalid_func}}", + } + + fm := template.FuncMap{} + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + err = ct.renderTemplate(fm) + assert.Error(t, err) + assert.Contains(t, err.Error(), "render server failed") + }) + + t.Run("render template with reflection discovery", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + Reflection: &GRPCReflectionDiscovery{ + FullMethod: "{{service}}.{{method}}/{{rpc}}", + JSONRequest: "{{json_request}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "service": func() string { + return "greeter" + }, + "method": func() string { + return "Greeter" + }, + "rpc": func() string { + return "SayHello" + }, + "json_request": func() string { + return `{"name":"test"}` + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + assert.Equal(t, "greeter.Greeter/SayHello", ct.AdvanceOptions.RequestOptions.Reflection.FullMethod) + assert.Equal(t, `{"name":"test"}`, ct.AdvanceOptions.RequestOptions.Reflection.JSONRequest) + }) + + t.Run("render template with health check discovery", func(t *T.T) { + ct := &GRPCTask{ + Task: &Task{}, + Server: "localhost:50051", + AdvanceOptions: &GRPCAdvanceOption{ + RequestOptions: &GRPCOptRequest{ + HealthCheck: &GRPCHealthCheckDiscovery{ + Service: "{{service_name}}", + }, + }, + }, + } + + fm := template.FuncMap{ + "service_name": func() string { + return "greeter.Greeter" + }, + } + + task, err := NewTask("", ct) + assert.NoError(t, err) + + ct, ok := task.(*GRPCTask) + assert.True(t, ok) + assert.NoError(t, ct.renderTemplate(fm)) + + assert.Equal(t, "greeter.Greeter", ct.AdvanceOptions.RequestOptions.HealthCheck.Service) }) } diff --git a/dialtesting/grpcproto/common.proto b/dialtesting/grpcproto/common.proto new file mode 100644 index 00000000..22e8c87d --- /dev/null +++ b/dialtesting/grpcproto/common.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package common; + +option go_package = "datakittest/grpc/pb"; + +message result { + int32 code = 1; + string msg = 2; +} diff --git a/dialtesting/grpcproto/greeter.proto b/dialtesting/grpcproto/greeter.proto new file mode 100644 index 00000000..aafacab6 --- /dev/null +++ b/dialtesting/grpcproto/greeter.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package greeter; + +option go_package = "datakittest/grpc/pb"; + +import "grpcproto/common.proto"; + +service Greeter { + rpc SayHello (HelloRequest) returns (common.result) {} +} + +message HelloRequest { + string name = 1; +} diff --git a/dialtesting/task.go b/dialtesting/task.go index ccd86c12..1d6a1174 100644 --- a/dialtesting/task.go +++ b/dialtesting/task.go @@ -679,6 +679,13 @@ func (t *Task) GetPostScriptVars() Vars { return nil } + if ct, ok := t.child.(*GRPCTask); ok { + if ct.postScriptResult != nil { + return ct.postScriptResult.Vars + } + return nil + } + return nil } diff --git a/go.mod b/go.mod index 89583c8b..52ba4a1f 100644 --- a/go.mod +++ b/go.mod @@ -9,30 +9,28 @@ require ( github.com/brianvoe/gofakeit/v6 v6.28.0 github.com/didip/tollbooth/v6 v6.1.2 github.com/gin-gonic/gin v1.9.0 - github.com/go-ping/ping v1.1.0 github.com/gobwas/ws v1.1.0 github.com/gogo/protobuf v1.3.2 github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 github.com/gorilla/websocket v1.5.0 github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/influxdata/line-protocol/v2 v2.2.1 - github.com/jhump/protoreflect v1.16.0 + github.com/jhump/protoreflect v1.15.1 github.com/klauspost/compress v1.16.7 github.com/pierrec/lz4/v4 v4.1.18 github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 - github.com/prometheus/prometheus v0.46.0 + github.com/prometheus/prometheus v0.39.1 github.com/robfig/cron/v3 v3.0.1 github.com/rs/xid v1.2.1 github.com/stretchr/testify v1.9.0 - github.com/tidwall/wal v1.1.7 go.uber.org/zap v1.23.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/net v0.25.0 - golang.org/x/sys v0.20.0 - google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/net v0.16.0 + golang.org/x/sys v0.13.0 + google.golang.org/grpc v1.51.0 + google.golang.org/protobuf v1.31.0 gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 @@ -51,7 +49,7 @@ require ( github.com/araddon/dateparse v0.0.0-20201001162425-8aadafed4dc4 // indirect github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/protocompile v0.14.1 // indirect + github.com/bufbuild/protocompile v0.4.0 // indirect github.com/bytedance/sonic v1.8.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect @@ -66,8 +64,8 @@ require ( github.com/gobwas/pool v0.2.1 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect @@ -92,21 +90,20 @@ require ( github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - github.com/tidwall/tinylru v1.1.0 // indirect github.com/tinylib/msgp v1.1.6 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.9 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.13.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect diff --git a/go.sum b/go.sum index ee282956..b27a62b8 100644 --- a/go.sum +++ b/go.sum @@ -141,8 +141,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= -github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= @@ -260,8 +260,6 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-pg/pg/v10 v10.10.6/go.mod h1:GLmFXufrElQHf5uzM3BQlcfwV3nsgnHue5uzjQ6Nqxg= github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw= -github.com/go-ping/ping v1.1.0/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk= github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6dksJLXQDc= github.com/go-pkgz/expirable-cache v0.0.3/go.mod h1:+IauqN00R2FqNRLCLA+X5YljQJrwB179PfiAoMPlTlQ= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -359,8 +357,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -380,7 +378,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -408,9 +406,8 @@ github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYa github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -533,8 +530,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= -github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -732,8 +729,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= -github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= +github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -799,7 +796,6 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP github.com/tidwall/btree v0.6.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4= github.com/tidwall/buntdb v1.2.6/go.mod h1:zpXqlA5D2772I4cTqV3ifr2AZihDgi8FV7xAQu6edfc= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= -github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/grect v0.1.2/go.mod h1:v+n4ewstPGduVJebcp5Eh2WXBJBumNzyhK8GZt4gHNw= @@ -812,11 +808,7 @@ github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ= -github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= -github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw= -github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= -github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= @@ -913,8 +905,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1010,8 +1002,8 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1040,8 +1032,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1126,8 +1118,8 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1143,8 +1135,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1322,8 +1314,8 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1352,8 +1344,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1368,8 +1360,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17 h1:+FLF3KjV2Syb0WlJ60dYTww7aPOJmOA5l0/Kg4AtkSo= gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.17/go.mod h1:sxFF5v+R56bO5bE/mN0K39GXaODxNiMrUM9K9xSjs+Q= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/vendor/github.com/bufbuild/protocompile/.golangci.yml b/vendor/github.com/bufbuild/protocompile/.golangci.yml index 0f00b31d..37a369a6 100644 --- a/vendor/github.com/bufbuild/protocompile/.golangci.yml +++ b/vendor/github.com/bufbuild/protocompile/.golangci.yml @@ -1,3 +1,7 @@ +run: + skip-dirs-use-default: false + skip-files: + - ".*\\.y\\.go$" linters-settings: errcheck: check-type-assertions: true @@ -22,9 +26,6 @@ linters-settings: # TODO, OPT, etc. comments are fine to commit. Use FIXME comments for # temporary hacks, and use godox to prevent committing them. keywords: [FIXME] - govet: - enable: - - fieldalignment varnamelen: ignore-decls: - T any @@ -34,8 +35,7 @@ linters: enable-all: true disable: # TODO: TCN-350 - initial exclusions for failing linters. - # Should enable all of these? - - depguard + # Should enable all of these. - dupl - errname - errorlint @@ -47,7 +47,7 @@ linters: - gochecknoinits - goconst - gocyclo - - err113 + - goerr113 - interfacebloat - nestif - nilerr @@ -57,32 +57,35 @@ linters: - varnamelen # Other disabled linters - cyclop # covered by gocyclo - - execinquery # deprecated in golangci v1.58.0 + - deadcode # deprecated by author + - exhaustivestruct # replaced by exhaustruct - funlen # rely on code review to limit function length - gocognit # dubious "cognitive overhead" quantification - gofumpt # prefer standard gofmt + - golint # deprecated by Go team - gomnd # some unnamed constants are okay - - inamedparam # named params in interface signatures are not always necessary + - ifshort # deprecated by author + - interfacer # deprecated by author - ireturn # "accept interfaces, return structs" isn't ironclad - lll # don't want hard limits for line length - maintidx # covered by gocyclo - - mnd # some unnamed constants are okay + - maligned # readability trumps efficient struct packing - nlreturn # generous whitespace violates house style - - protogetter # lots of false positives: can't use getter to check if field is present + - nosnakecase # deprecated in https://github.com/golangci/golangci-lint/pull/3065 - rowserrcheck # no SQL code in protocompile + - scopelint # deprecated by author - sqlclosecheck # no SQL code in protocompile + - structcheck # deprecated by author - testpackage # internal tests are fine + - varcheck # deprecated by author - wastedassign # not supported with generics - wrapcheck # don't _always_ need to wrap errors - wsl # generous whitespace violates house style issues: - exclude-dirs-use-default: false - exclude-files: - - ".*\\.y\\.go$" exclude: # Don't ban use of fmt.Errorf to create new errors, but the remaining # checks from err113 are useful. - - "do not define dynamic errors.*" + - "err113: do not define dynamic errors.*" exclude-rules: # Benchmarks can't be run in parallel - path: benchmark_test\.go @@ -94,12 +97,3 @@ issues: linters: - dupword - gosec - # exclude field alignment linter in tests - - path: _test\.go - text: "fieldalignment:" - linters: - - govet - # exclude fieldalignment "pointer bytes" failures - - text: "pointer bytes" - linters: - - govet diff --git a/vendor/github.com/bufbuild/protocompile/.protoc_version b/vendor/github.com/bufbuild/protocompile/.protoc_version deleted file mode 100644 index a0d6856d..00000000 --- a/vendor/github.com/bufbuild/protocompile/.protoc_version +++ /dev/null @@ -1 +0,0 @@ -27.0 diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE index 553cbbf1..04cf1e31 100644 --- a/vendor/github.com/bufbuild/protocompile/LICENSE +++ b/vendor/github.com/bufbuild/protocompile/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020-2024 Buf Technologies, Inc. + Copyright 2020-2022 Buf Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/Makefile b/vendor/github.com/bufbuild/protocompile/Makefile index 6ec2e78e..afca92cc 100644 --- a/vendor/github.com/bufbuild/protocompile/Makefile +++ b/vendor/github.com/bufbuild/protocompile/Makefile @@ -6,37 +6,32 @@ SHELL := bash MAKEFLAGS += --warn-undefined-variables MAKEFLAGS += --no-builtin-rules MAKEFLAGS += --no-print-directory -BIN ?= $(abspath .tmp/bin) -CACHE := $(abspath .tmp/cache) -COPYRIGHT_YEARS := 2020-2024 +BIN := $(abspath .tmp/bin) +COPYRIGHT_YEARS := 2020-2022 LICENSE_IGNORE := -e /testdata/ # Set to use a different compiler. For example, `GO=go1.18rc1 make test`. GO ?= go TOOLS_MOD_DIR := ./internal/tools UNAME_OS := $(shell uname -s) UNAME_ARCH := $(shell uname -m) -PATH_SEP ?= ":" -PROTOC_VERSION := $(shell cat ./.protoc_version) -# For release candidates, the download artifact has a dash between "rc" and the number even -# though the version tag does not :( -PROTOC_ARTIFACT_VERSION := $(shell echo $(PROTOC_VERSION) | sed -E 's/-rc([0-9]+)$$/-rc-\1/g') -PROTOC_DIR := $(abspath $(CACHE)/protoc/$(PROTOC_VERSION)) +# NB: this must be kept in sync with constant in internal/benchmarks. +PROTOC_VERSION ?= 22.0 +PROTOC_DIR := $(abspath ./internal/testdata/protoc/$(PROTOC_VERSION)) PROTOC := $(PROTOC_DIR)/bin/protoc -LOWER_UNAME_OS := $(shell echo $(UNAME_OS) | tr A-Z a-z) -ifeq ($(LOWER_UNAME_OS),darwin) - PROTOC_OS := osx - ifeq ($(UNAME_ARCH),arm64) - PROTOC_ARCH := aarch_64 - else - PROTOC_ARCH := x86_64 - endif +ifeq ($(UNAME_OS),Darwin) +PROTOC_OS := osx +ifeq ($(UNAME_ARCH),arm64) +PROTOC_ARCH := aarch_64 else - PROTOC_OS := $(LOWER_UNAME_OS) - PROTOC_ARCH := $(UNAME_ARCH) +PROTOC_ARCH := x86_64 +endif +endif +ifeq ($(UNAME_OS),Linux) +PROTOC_OS := linux +PROTOC_ARCH := $(UNAME_ARCH) endif -PROTOC_ARTIFACT_SUFFIX ?= $(PROTOC_OS)-$(PROTOC_ARCH) .PHONY: help help: ## Describe useful make targets @@ -54,10 +49,7 @@ clean: ## Delete intermediate build artifacts .PHONY: test test: build ## Run unit tests - $(GO) test -race -cover ./... - $(GO) test -tags protolegacy ./... - $(GO) test -tags purego ./... - cd internal/benchmarks && SKIP_DOWNLOAD_GOOGLEAPIS=true $(GO) test -race -cover ./... + $(GO) test -vet=off -race -cover ./... .PHONY: benchmarks benchmarks: build ## Run benchmarks @@ -83,8 +75,8 @@ lintfix: $(BIN)/golangci-lint ## Automatically fix some lint errors cd internal/benchmarks && $(BIN)/golangci-lint run --fix .PHONY: generate -generate: $(BIN)/license-header $(BIN)/goyacc test-descriptors ext-features-descriptors ## Regenerate code and licenses - PATH="$(BIN)$(PATH_SEP)$(PATH)" $(GO) generate ./... +generate: $(BIN)/license-header $(BIN)/goyacc test-descriptors ## Regenerate code and licenses + PATH="$(BIN):$(PATH)" $(GO) generate ./... @# We want to operate on a list of modified and new files, excluding @# deleted and ignored files. git-ls-files can't do this alone. comm -23 takes @# two files and prints the union, dropping lines common to both (-3) and @@ -111,37 +103,27 @@ checkgenerate: $(BIN)/license-header: internal/tools/go.mod internal/tools/go.sum @mkdir -p $(@D) cd $(TOOLS_MOD_DIR) && \ - GOWORK=off $(GO) build -o $@ github.com/bufbuild/buf/private/pkg/licenseheader/cmd/license-header + $(GO) build -o $@ github.com/bufbuild/buf/private/pkg/licenseheader/cmd/license-header $(BIN)/golangci-lint: internal/tools/go.mod internal/tools/go.sum @mkdir -p $(@D) cd $(TOOLS_MOD_DIR) && \ - GOWORK=off $(GO) build -o $@ github.com/golangci/golangci-lint/cmd/golangci-lint + $(GO) build -o $@ github.com/golangci/golangci-lint/cmd/golangci-lint $(BIN)/goyacc: internal/tools/go.mod internal/tools/go.sum @mkdir -p $(@D) cd $(TOOLS_MOD_DIR) && \ - GOWORK=off $(GO) build -o $@ golang.org/x/tools/cmd/goyacc + $(GO) build -o $@ golang.org/x/tools/cmd/goyacc -$(CACHE)/protoc-$(PROTOC_VERSION).zip: +internal/testdata/protoc/cache/protoc-$(PROTOC_VERSION).zip: @mkdir -p $(@D) - curl -o $@ -fsSL https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_ARTIFACT_VERSION)-$(PROTOC_ARTIFACT_SUFFIX).zip + curl -o $@ -fsSL https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-$(PROTOC_ARCH).zip -.PHONY: protoc -protoc: $(PROTOC) - -$(PROTOC): $(CACHE)/protoc-$(PROTOC_VERSION).zip +$(PROTOC): internal/testdata/protoc/cache/protoc-$(PROTOC_VERSION).zip @mkdir -p $(@D) unzip -o -q $< -d $(PROTOC_DIR) && \ touch $@ -.PHONY: wellknownimports -wellknownimports: $(PROTOC) $(sort $(wildcard $(PROTOC_DIR)/include/google/protobuf/*.proto)) $(sort $(wildcard $(PROTOC_DIR)/include/google/protobuf/*/*.proto)) - @rm -rf wellknownimports/google 2>/dev/null && true - @mkdir -p wellknownimports/google/protobuf/compiler - cp -R $(PROTOC_DIR)/include/google/protobuf/*.proto wellknownimports/google/protobuf - cp -R $(PROTOC_DIR)/include/google/protobuf/compiler/*.proto wellknownimports/google/protobuf/compiler - internal/testdata/all.protoset: $(PROTOC) $(sort $(wildcard internal/testdata/*.proto)) cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) @@ -154,48 +136,24 @@ internal/testdata/desc_test_defaults.protoset: $(PROTOC) internal/testdata/desc_ internal/testdata/desc_test_proto3_optional.protoset: $(PROTOC) internal/testdata/desc_test_proto3_optional.proto cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) -internal/testdata/descriptor_impl_tests.protoset: $(PROTOC) internal/testdata/desc_test2.proto internal/testdata/desc_test_complex.proto internal/testdata/desc_test_defaults.proto internal/testdata/desc_test_proto3.proto internal/testdata/desc_test_proto3_optional.proto - cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) - -internal/testdata/descriptor_editions_impl_tests.protoset: $(PROTOC) internal/testdata/editions/all_default_features.proto internal/testdata/editions/features_with_overrides.proto internal/testdata/editions/file_default_delimited.proto - cd $(@D)/editions && $(PROTOC) --descriptor_set_out=../$(@F) --include_imports -I. $(filter-out protoc,$(^F)) - -internal/testdata/editions/all.protoset: $(PROTOC) $(sort $(wildcard internal/testdata/editions/*.proto)) +internal/testdata/descriptor_impl_tests.protoset: $(PROTOC) internal/testdata/desc_test2.proto internal/testdata/desc_test_defaults.proto internal/testdata/desc_test_proto3.proto internal/testdata/desc_test_proto3_optional.proto cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_imports -I. $(filter-out protoc,$(^F)) internal/testdata/source_info.protoset: $(PROTOC) internal/testdata/desc_test_options.proto internal/testdata/desc_test_comments.proto internal/testdata/desc_test_complex.proto cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) --include_source_info -I. $(filter-out protoc,$(^F)) -internal/testdata/options/options.protoset: $(PROTOC) internal/testdata/options/options.proto - cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) - internal/testdata/options/test.protoset: $(PROTOC) internal/testdata/options/test.proto cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) internal/testdata/options/test_proto3.protoset: $(PROTOC) internal/testdata/options/test_proto3.proto cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) -internal/testdata/options/test_editions.protoset: $(PROTOC) internal/testdata/options/test_editions.proto - cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) -I. $(filter-out protoc,$(^F)) - .PHONY: test-descriptors test-descriptors: internal/testdata/all.protoset test-descriptors: internal/testdata/desc_test_complex.protoset test-descriptors: internal/testdata/desc_test_defaults.protoset test-descriptors: internal/testdata/desc_test_proto3_optional.protoset test-descriptors: internal/testdata/descriptor_impl_tests.protoset -test-descriptors: internal/testdata/descriptor_editions_impl_tests.protoset -test-descriptors: internal/testdata/editions/all.protoset test-descriptors: internal/testdata/source_info.protoset -test-descriptors: internal/testdata/options/options.protoset test-descriptors: internal/testdata/options/test.protoset test-descriptors: internal/testdata/options/test_proto3.protoset -test-descriptors: internal/testdata/options/test_editions.protoset - -internal/featuresext/cpp_features.protoset: $(PROTOC) - cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) google/protobuf/cpp_features.proto -internal/featuresext/java_features.protoset: $(PROTOC) - cd $(@D) && $(PROTOC) --descriptor_set_out=$(@F) google/protobuf/java_features.proto - -.PHONY: ext-features-descriptors -ext-features-descriptors: internal/featuresext/cpp_features.protoset internal/featuresext/java_features.protoset diff --git a/vendor/github.com/bufbuild/protocompile/ast/doc.go b/vendor/github.com/bufbuild/protocompile/ast/doc.go index cda4068c..fac65c42 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/doc.go +++ b/vendor/github.com/bufbuild/protocompile/ast/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/ast/enum.go b/vendor/github.com/bufbuild/protocompile/ast/enum.go index 55a62292..93edbcbd 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/enum.go +++ b/vendor/github.com/bufbuild/protocompile/ast/enum.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -76,16 +76,6 @@ func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, dec } } -func (n *EnumNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - // EnumElement is an interface implemented by all AST nodes that can // appear in the body of an enum declaration. type EnumElement interface { @@ -102,15 +92,15 @@ var _ EnumElement = (*EmptyDeclNode)(nil) // enum values. This allows NoSourceNode to be used in place of *EnumValueNode // for some usages. type EnumValueDeclNode interface { - NodeWithOptions + Node GetName() Node GetNumber() Node } var _ EnumValueDeclNode = (*EnumValueNode)(nil) -var _ EnumValueDeclNode = (*NoSourceNode)(nil) +var _ EnumValueDeclNode = NoSourceNode{} -// EnumValueNode represents an enum declaration. Example: +// EnumNode represents an enum declaration. Example: // // UNSET = 0 [deprecated = true]; type EnumValueNode struct { @@ -141,10 +131,10 @@ func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, op if number == nil { panic("number is nil") } - numChildren := 3 - if semicolon != nil { - numChildren++ + if semicolon == nil { + panic("semicolon is nil") } + numChildren := 4 if opts != nil { numChildren++ } @@ -153,9 +143,7 @@ func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, op if opts != nil { children = append(children, opts) } - if semicolon != nil { - children = append(children, semicolon) - } + children = append(children, semicolon) return &EnumValueNode{ compositeNode: compositeNode{ children: children, @@ -175,11 +163,3 @@ func (e *EnumValueNode) GetName() Node { func (e *EnumValueNode) GetNumber() Node { return e.Number } - -func (e *EnumValueNode) RangeOptions(fn func(*OptionNode) bool) { - for _, opt := range e.Options.Options { - if !fn(opt) { - return - } - } -} diff --git a/vendor/github.com/bufbuild/protocompile/ast/field.go b/vendor/github.com/bufbuild/protocompile/ast/field.go index 63d65b3a..d4d3e392 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/field.go +++ b/vendor/github.com/bufbuild/protocompile/ast/field.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ import "fmt" // This also allows NoSourceNode and SyntheticMapField to be used in place of // one of the above for some usages. type FieldDeclNode interface { - NodeWithOptions + Node FieldLabel() Node FieldName() Node FieldType() Node @@ -41,7 +41,7 @@ var _ FieldDeclNode = (*FieldNode)(nil) var _ FieldDeclNode = (*GroupNode)(nil) var _ FieldDeclNode = (*MapFieldNode)(nil) var _ FieldDeclNode = (*SyntheticMapField)(nil) -var _ FieldDeclNode = (*NoSourceNode)(nil) +var _ FieldDeclNode = NoSourceNode{} // FieldNode represents a normal field declaration (not groups or maps). It // can represent extension fields as well as non-extension fields (both inside @@ -64,7 +64,7 @@ type FieldNode struct { } func (*FieldNode) msgElement() {} -func (*FieldNode) oneofElement() {} +func (*FieldNode) oneOfElement() {} func (*FieldNode) extendElement() {} // NewFieldNode creates a new *FieldNode. The label and options arguments may be @@ -84,16 +84,16 @@ func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, if name == nil { panic("name is nil") } - numChildren := 2 - if equals != nil { - numChildren++ + if equals == nil { + panic("equals is nil") } - if tag != nil { - numChildren++ + if tag == nil { + panic("tag is nil") } - if semicolon != nil { - numChildren++ + if semicolon == nil { + panic("semicolon is nil") } + numChildren := 5 if label != nil { numChildren++ } @@ -104,19 +104,11 @@ func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, if label != nil { children = append(children, label) } - children = append(children, fieldType, name) - if equals != nil { - children = append(children, equals) - } - if tag != nil { - children = append(children, tag) - } + children = append(children, fieldType, name, equals, tag) if opts != nil { children = append(children, opts) } - if semicolon != nil { - children = append(children, semicolon) - } + children = append(children, semicolon) return &FieldNode{ compositeNode: compositeNode{ @@ -151,9 +143,6 @@ func (n *FieldNode) FieldType() Node { } func (n *FieldNode) FieldTag() Node { - if n.Tag == nil { - return n - } return n.Tag } @@ -172,14 +161,6 @@ func (n *FieldNode) GetOptions() *CompactOptionsNode { return n.Options } -func (n *FieldNode) RangeOptions(fn func(*OptionNode) bool) { - for _, opt := range n.Options.Options { - if !fn(opt) { - return - } - } -} - // FieldLabel represents the label of a field, which indicates its cardinality // (i.e. whether it is optional, required, or repeated). type FieldLabel struct { @@ -232,7 +213,7 @@ type GroupNode struct { } func (*GroupNode) msgElement() {} -func (*GroupNode) oneofElement() {} +func (*GroupNode) oneOfElement() {} func (*GroupNode) extendElement() {} // NewGroupNode creates a new *GroupNode. The label and options arguments may be @@ -254,22 +235,22 @@ func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equ if name == nil { panic("name is nil") } + if equals == nil { + panic("equals is nil") + } + if tag == nil { + panic("tag is nil") + } if openBrace == nil { panic("openBrace is nil") } if closeBrace == nil { panic("closeBrace is nil") } - numChildren := 4 + len(decls) + numChildren := 6 + len(decls) if label != nil { numChildren++ } - if equals != nil { - numChildren++ - } - if tag != nil { - numChildren++ - } if opts != nil { numChildren++ } @@ -277,13 +258,7 @@ func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equ if label != nil { children = append(children, label) } - children = append(children, keyword, name) - if equals != nil { - children = append(children, equals) - } - if tag != nil { - children = append(children, tag) - } + children = append(children, keyword, name, equals, tag) if opts != nil { children = append(children, opts) } @@ -325,9 +300,6 @@ func (n *GroupNode) FieldType() Node { } func (n *GroupNode) FieldTag() Node { - if n.Tag == nil { - return n - } return n.Tag } @@ -346,57 +318,23 @@ func (n *GroupNode) GetOptions() *CompactOptionsNode { return n.Options } -func (n *GroupNode) RangeOptions(fn func(*OptionNode) bool) { - for _, opt := range n.Options.Options { - if !fn(opt) { - return - } - } -} - -func (n *GroupNode) AsMessage() *SyntheticGroupMessageNode { - return (*SyntheticGroupMessageNode)(n) -} - -// SyntheticGroupMessageNode is a view of a GroupNode that implements MessageDeclNode. -// Since a group field implicitly defines a message type, this node represents -// that message type while the corresponding GroupNode represents the field. -// -// This type is considered synthetic since it never appears in a file's AST, but -// is only returned from other accessors (e.g. GroupNode.AsMessage). -type SyntheticGroupMessageNode GroupNode - -func (n *SyntheticGroupMessageNode) MessageName() Node { +func (n *GroupNode) MessageName() Node { return n.Name } -func (n *SyntheticGroupMessageNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - -// OneofDeclNode is a node in the AST that defines a oneof. There are +// OneOfDeclNode is a node in the AST that defines a oneof. There are // multiple types of AST nodes that declare oneofs: -// - *OneofNode -// - *SyntheticOneof +// - *OneOfNode +// - *SyntheticOneOf // // This also allows NoSourceNode to be used in place of one of the above // for some usages. -type OneofDeclNode interface { - NodeWithOptions - OneofName() Node +type OneOfDeclNode interface { + Node + OneOfName() Node } -var _ OneofDeclNode = (*OneofNode)(nil) -var _ OneofDeclNode = (*SyntheticOneof)(nil) -var _ OneofDeclNode = (*NoSourceNode)(nil) - -// OneofNode represents a one-of declaration. Example: +// OneOfNode represents a one-of declaration. Example: // // oneof query { // string by_name = 2; @@ -404,18 +342,18 @@ var _ OneofDeclNode = (*NoSourceNode)(nil) // Address by_address = 4; // Labels by_label = 5; // } -type OneofNode struct { +type OneOfNode struct { compositeNode Keyword *KeywordNode Name *IdentNode OpenBrace *RuneNode - Decls []OneofElement + Decls []OneOfElement CloseBrace *RuneNode } -func (*OneofNode) msgElement() {} +func (*OneOfNode) msgElement() {} -// NewOneofNode creates a new *OneofNode. All arguments must be non-nil. While +// NewOneOfNode creates a new *OneOfNode. All arguments must be non-nil. While // it is technically allowed for decls to be nil or empty, the resulting node // will not be a valid oneof, which must have at least one field. // - keyword: The token corresponding to the "oneof" keyword. @@ -423,7 +361,7 @@ func (*OneofNode) msgElement() {} // - openBrace: The token corresponding to the "{" rune that starts the body. // - decls: All declarations inside the oneof body. // - closeBrace: The token corresponding to the "}" rune that ends the body. -func NewOneofNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneofElement, closeBrace *RuneNode) *OneofNode { +func NewOneOfNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneOfElement, closeBrace *RuneNode) *OneOfNode { if keyword == nil { panic("keyword is nil") } @@ -447,11 +385,11 @@ func NewOneofNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, de switch decl := decl.(type) { case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode: default: - panic(fmt.Sprintf("invalid OneofElement type: %T", decl)) + panic(fmt.Sprintf("invalid OneOfElement type: %T", decl)) } } - return &OneofNode{ + return &OneOfNode{ compositeNode: compositeNode{ children: children, }, @@ -463,73 +401,56 @@ func NewOneofNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, de } } -func (n *OneofNode) OneofName() Node { +func (n *OneOfNode) OneOfName() Node { return n.Name } -func (n *OneofNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - -// OneofElement is an interface implemented by all AST nodes that can +// OneOfElement is an interface implemented by all AST nodes that can // appear in the body of a oneof declaration. -type OneofElement interface { +type OneOfElement interface { Node - oneofElement() + oneOfElement() } -var _ OneofElement = (*OptionNode)(nil) -var _ OneofElement = (*FieldNode)(nil) -var _ OneofElement = (*GroupNode)(nil) -var _ OneofElement = (*EmptyDeclNode)(nil) +var _ OneOfElement = (*OptionNode)(nil) +var _ OneOfElement = (*FieldNode)(nil) +var _ OneOfElement = (*GroupNode)(nil) +var _ OneOfElement = (*EmptyDeclNode)(nil) -// SyntheticOneof is not an actual node in the AST but a synthetic node +// SyntheticOneOf is not an actual node in the AST but a synthetic node // that represents the oneof implied by a proto3 optional field. -// -// This type is considered synthetic since it never appears in a file's AST, -// but is only returned from other functions (e.g. NewSyntheticOneof). -type SyntheticOneof struct { - // The proto3 optional field that implies the presence of this oneof. +type SyntheticOneOf struct { Field *FieldNode } -var _ Node = (*SyntheticOneof)(nil) +var _ Node = (*SyntheticOneOf)(nil) -// NewSyntheticOneof creates a new *SyntheticOneof that corresponds to the +// NewSyntheticOneOf creates a new *SyntheticOneOf that corresponds to the // given proto3 optional field. -func NewSyntheticOneof(field *FieldNode) *SyntheticOneof { - return &SyntheticOneof{Field: field} +func NewSyntheticOneOf(field *FieldNode) *SyntheticOneOf { + return &SyntheticOneOf{Field: field} } -func (n *SyntheticOneof) Start() Token { +func (n *SyntheticOneOf) Start() Token { return n.Field.Start() } -func (n *SyntheticOneof) End() Token { +func (n *SyntheticOneOf) End() Token { return n.Field.End() } -func (n *SyntheticOneof) LeadingComments() []Comment { +func (n *SyntheticOneOf) LeadingComments() []Comment { return nil } -func (n *SyntheticOneof) TrailingComments() []Comment { +func (n *SyntheticOneOf) TrailingComments() []Comment { return nil } -func (n *SyntheticOneof) OneofName() Node { +func (n *SyntheticOneOf) OneOfName() Node { return n.Field.FieldName() } -func (n *SyntheticOneof) RangeOptions(_ func(*OptionNode) bool) { -} - // MapTypeNode represents the type declaration for a map field. It defines // both the key and value types for the map. Example: // @@ -614,33 +535,25 @@ func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, ta if name == nil { panic("name is nil") } - numChildren := 2 - if equals != nil { - numChildren++ + if equals == nil { + panic("equals is nil") } - if tag != nil { - numChildren++ + if tag == nil { + panic("tag is nil") } - if opts != nil { - numChildren++ + if semicolon == nil { + panic("semicolon is nil") } - if semicolon != nil { + numChildren := 5 + if opts != nil { numChildren++ } children := make([]Node, 0, numChildren) - children = append(children, mapType, name) - if equals != nil { - children = append(children, equals) - } - if tag != nil { - children = append(children, tag) - } + children = append(children, mapType, name, equals, tag) if opts != nil { children = append(children, opts) } - if semicolon != nil { - children = append(children, semicolon) - } + children = append(children, semicolon) return &MapFieldNode{ compositeNode: compositeNode{ @@ -668,9 +581,6 @@ func (n *MapFieldNode) FieldType() Node { } func (n *MapFieldNode) FieldTag() Node { - if n.Tag == nil { - return n - } return n.Tag } @@ -686,16 +596,8 @@ func (n *MapFieldNode) GetOptions() *CompactOptionsNode { return n.Options } -func (n *MapFieldNode) RangeOptions(fn func(*OptionNode) bool) { - for _, opt := range n.Options.Options { - if !fn(opt) { - return - } - } -} - -func (n *MapFieldNode) AsMessage() *SyntheticMapEntryNode { - return (*SyntheticMapEntryNode)(n) +func (n *MapFieldNode) MessageName() Node { + return n.Name } func (n *MapFieldNode) KeyField() *SyntheticMapField { @@ -706,28 +608,9 @@ func (n *MapFieldNode) ValueField() *SyntheticMapField { return NewSyntheticMapField(n.MapType.ValueType, 2) } -// SyntheticMapEntryNode is a view of a MapFieldNode that implements MessageDeclNode. -// Since a map field implicitly defines a message type for the map entry, -// this node represents that message type. -// -// This type is considered synthetic since it never appears in a file's AST, but -// is only returned from other accessors (e.g. MapFieldNode.AsMessage). -type SyntheticMapEntryNode MapFieldNode - -func (n *SyntheticMapEntryNode) MessageName() Node { - return n.Name -} - -func (n *SyntheticMapEntryNode) RangeOptions(_ func(*OptionNode) bool) { -} - // SyntheticMapField is not an actual node in the AST but a synthetic node // that implements FieldDeclNode. These are used to represent the implicit // field declarations of the "key" and "value" fields in a map entry. -// -// This type is considered synthetic since it never appears in a file's AST, -// but is only returned from other accessors and functions (e.g. -// MapFieldNode.KeyField, MapFieldNode.ValueField, and NewSyntheticMapField). type SyntheticMapField struct { Ident IdentValueNode Tag *UintLiteralNode @@ -773,9 +656,6 @@ func (n *SyntheticMapField) FieldType() Node { } func (n *SyntheticMapField) FieldTag() Node { - if n.Tag == nil { - return n - } return n.Tag } @@ -790,6 +670,3 @@ func (n *SyntheticMapField) GetGroupKeyword() Node { func (n *SyntheticMapField) GetOptions() *CompactOptionsNode { return nil } - -func (n *SyntheticMapField) RangeOptions(_ func(*OptionNode) bool) { -} diff --git a/vendor/github.com/bufbuild/protocompile/ast/file.go b/vendor/github.com/bufbuild/protocompile/ast/file.go index 50d4ca92..f12cca09 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/file.go +++ b/vendor/github.com/bufbuild/protocompile/ast/file.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,13 +19,14 @@ import "fmt" // FileDeclNode is a placeholder interface for AST nodes that represent files. // This allows NoSourceNode to be used in place of *FileNode for some usages. type FileDeclNode interface { - NodeWithOptions + Node Name() string + GetSyntax() Node NodeInfo(n Node) NodeInfo } var _ FileDeclNode = (*FileNode)(nil) -var _ FileDeclNode = (*NoSourceNode)(nil) +var _ FileDeclNode = NoSourceNode{} // FileNode is the root of the AST hierarchy. It represents an entire // protobuf source file. @@ -33,13 +34,8 @@ type FileNode struct { compositeNode fileInfo *FileInfo - // A file has either a Syntax or Edition node, never both. - // If both are nil, neither declaration is present and the - // file is assumed to use "proto2" syntax. - Syntax *SyntaxNode - Edition *EditionNode - - Decls []FileElement + Syntax *SyntaxNode // nil if file has no syntax declaration + Decls []FileElement // This synthetic node allows access to final comments and whitespace EOF *RuneNode @@ -51,31 +47,13 @@ type FileNode struct { // This function panics if the concrete type of any element of decls is not // from this package. func NewFileNode(info *FileInfo, syntax *SyntaxNode, decls []FileElement, eof Token) *FileNode { - return newFileNode(info, syntax, nil, decls, eof) -} - -// NewFileNodeWithEdition creates a new *FileNode. The edition parameter is required. If a file -// has no edition declaration, use NewFileNode instead. -// -// This function panics if the concrete type of any element of decls is not -// from this package. -func NewFileNodeWithEdition(info *FileInfo, edition *EditionNode, decls []FileElement, eof Token) *FileNode { - if edition == nil { - panic("edition is nil") - } - return newFileNode(info, nil, edition, decls, eof) -} - -func newFileNode(info *FileInfo, syntax *SyntaxNode, edition *EditionNode, decls []FileElement, eof Token) *FileNode { numChildren := len(decls) + 1 - if syntax != nil || edition != nil { + if syntax != nil { numChildren++ } children := make([]Node, 0, numChildren) if syntax != nil { children = append(children, syntax) - } else if edition != nil { - children = append(children, edition) } for _, decl := range decls { switch decl := decl.(type) { @@ -96,7 +74,6 @@ func newFileNode(info *FileInfo, syntax *SyntaxNode, edition *EditionNode, decls }, fileInfo: info, Syntax: syntax, - Edition: edition, Decls: decls, EOF: eofNode, } @@ -108,6 +85,10 @@ func NewEmptyFileNode(filename string) *FileNode { return NewFileNode(fileInfo, nil, nil, fileInfo.AddToken(0, 0)) } +func (f *FileNode) GetSyntax() Node { + return f.Syntax +} + func (f *FileNode) Name() string { return f.fileInfo.Name() } @@ -136,16 +117,6 @@ func (f *FileNode) Tokens() Sequence[Token] { return f.fileInfo.Tokens() } -func (f *FileNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range f.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - // FileElement is an interface implemented by all AST nodes that are // allowed as top-level declarations in the file. type FileElement interface { @@ -191,12 +162,10 @@ func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNod if syntax == nil { panic("syntax is nil") } - var children []Node if semicolon == nil { - children = []Node{keyword, equals, syntax} - } else { - children = []Node{keyword, equals, syntax, semicolon} + panic("semicolon is nil") } + children := []Node{keyword, equals, syntax, semicolon} return &SyntaxNode{ compositeNode: compositeNode{ children: children, @@ -208,51 +177,6 @@ func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNod } } -// EditionNode represents an edition declaration, which if present must be -// the first non-comment content. Example: -// -// edition = "2023"; -// -// Files may include either an edition node or a syntax node, but not both. -// If neither are present, the file is assumed to use proto2 syntax. -type EditionNode struct { - compositeNode - Keyword *KeywordNode - Equals *RuneNode - Edition StringValueNode - Semicolon *RuneNode -} - -// NewEditionNode creates a new *EditionNode. All four arguments must be non-nil: -// - keyword: The token corresponding to the "edition" keyword. -// - equals: The token corresponding to the "=" rune. -// - edition: The actual edition value, e.g. "2023". -// - semicolon: The token corresponding to the ";" rune that ends the declaration. -func NewEditionNode(keyword *KeywordNode, equals *RuneNode, edition StringValueNode, semicolon *RuneNode) *EditionNode { - if keyword == nil { - panic("keyword is nil") - } - if equals == nil { - panic("equals is nil") - } - if edition == nil { - panic("edition is nil") - } - if semicolon == nil { - panic("semicolon is nil") - } - children := []Node{keyword, equals, edition, semicolon} - return &EditionNode{ - compositeNode: compositeNode{ - children: children, - }, - Keyword: keyword, - Equals: equals, - Edition: edition, - Semicolon: semicolon, - } -} - // ImportNode represents an import statement. Example: // // import "google/protobuf/empty.proto"; @@ -285,10 +209,10 @@ func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, if name == nil { panic("name is nil") } - numChildren := 2 if semicolon == nil { - numChildren++ + panic("semicolon is nil") } + numChildren := 3 if public != nil || weak != nil { numChildren++ } @@ -299,10 +223,7 @@ func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, } else if weak != nil { children = append(children, weak) } - children = append(children, name) - if semicolon != nil { - children = append(children, semicolon) - } + children = append(children, name, semicolon) return &ImportNode{ compositeNode: compositeNode{ @@ -341,12 +262,10 @@ func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNo if name == nil { panic("name is nil") } - var children []Node if semicolon == nil { - children = []Node{keyword, name} - } else { - children = []Node{keyword, name, semicolon} + panic("semicolon is nil") } + children := []Node{keyword, name, semicolon} return &PackageNode{ compositeNode: compositeNode{ children: children, diff --git a/vendor/github.com/bufbuild/protocompile/ast/file_info.go b/vendor/github.com/bufbuild/protocompile/ast/file_info.go index 7c2d9049..1de93457 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/file_info.go +++ b/vendor/github.com/bufbuild/protocompile/ast/file_info.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -163,10 +163,10 @@ func (f *FileInfo) TokenInfo(t Token) NodeInfo { func (f *FileInfo) nodeInfo(start, end int) NodeInfo { if start < 0 || start >= len(f.items) { - return NodeInfo{fileInfo: f} + return NodeInfo{} } if end < 0 || end >= len(f.items) { - return NodeInfo{fileInfo: f} + return NodeInfo{} } return NodeInfo{fileInfo: f, startIndex: start, endIndex: end} } @@ -351,7 +351,7 @@ func (f *FileInfo) SourcePos(offset int) SourcePos { return f.lines[n] > offset }) - // If it weren't for tabs and multibyte unicode characters, we + // If it weren't for tabs and multi-byte unicode characters, we // could trivially compute the column just based on offset and the // starting offset of lineNumber :( // Wish this were more efficient... that would require also storing @@ -399,7 +399,8 @@ type Item int // ItemInfo provides details about an item's location in the source file and // its contents. type ItemInfo interface { - SourceSpan + Start() SourcePos + End() SourcePos LeadingWhitespace() string RawText() string } @@ -597,32 +598,6 @@ func (pos SourcePos) String() string { return fmt.Sprintf("%s:%d:%d", pos.Filename, pos.Line, pos.Col) } -// SourceSpan represents a range of source positions. -type SourceSpan interface { - Start() SourcePos - End() SourcePos -} - -// NewSourceSpan creates a new span that covers the given range. -func NewSourceSpan(start SourcePos, end SourcePos) SourceSpan { - return sourceSpan{StartPos: start, EndPos: end} -} - -type sourceSpan struct { - StartPos SourcePos - EndPos SourcePos -} - -func (p sourceSpan) Start() SourcePos { - return p.StartPos -} - -func (p sourceSpan) End() SourcePos { - return p.EndPos -} - -var _ SourceSpan = sourceSpan{} - // Comments represents a range of sequential comments in a source file // (e.g. no interleaving items or AST nodes). type Comments struct { diff --git a/vendor/github.com/bufbuild/protocompile/ast/identifiers.go b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go index 511389d7..27599929 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/identifiers.go +++ b/vendor/github.com/bufbuild/protocompile/ast/identifiers.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -91,10 +91,10 @@ func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots [] if len(components) == 0 { panic("must have at least one component") } - if len(dots) != len(components)-1 && len(dots) != len(components) { + if len(dots) != len(components)-1 { panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots))) } - numChildren := len(components) + len(dots) + numChildren := len(components)*2 - 1 if leadingDot != nil { numChildren++ } @@ -113,11 +113,6 @@ func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots [] children = append(children, comp) b.WriteString(comp.Val) } - if len(dots) == len(components) { - dot := dots[len(dots)-1] - children = append(children, dot) - b.WriteRune(dot.Rune) - } return &CompoundIdentNode{ compositeNode: compositeNode{ children: children, diff --git a/vendor/github.com/bufbuild/protocompile/ast/message.go b/vendor/github.com/bufbuild/protocompile/ast/message.go index eede28ed..00dfe45f 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/message.go +++ b/vendor/github.com/bufbuild/protocompile/ast/message.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,20 +19,20 @@ import "fmt" // MessageDeclNode is a node in the AST that defines a message type. This // includes normal message fields as well as implicit messages: // - *MessageNode -// - *SyntheticGroupMessageNode (the group is a field and inline message type) -// - *SyntheticMapEntryNode (map fields implicitly define a MapEntry message type) +// - *GroupNode (the group is a field and inline message type) +// - *MapFieldNode (map fields implicitly define a MapEntry message type) // // This also allows NoSourceNode to be used in place of one of the above // for some usages. type MessageDeclNode interface { - NodeWithOptions + Node MessageName() Node } var _ MessageDeclNode = (*MessageNode)(nil) -var _ MessageDeclNode = (*SyntheticGroupMessageNode)(nil) -var _ MessageDeclNode = (*SyntheticMapEntryNode)(nil) -var _ MessageDeclNode = (*NoSourceNode)(nil) +var _ MessageDeclNode = (*GroupNode)(nil) +var _ MessageDeclNode = (*MapFieldNode)(nil) +var _ MessageDeclNode = NoSourceNode{} // MessageNode represents a message declaration. Example: // @@ -92,16 +92,6 @@ func (n *MessageNode) MessageName() Node { return n.Name } -func (n *MessageNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - // MessageBody represents the body of a message. It is used by both // MessageNodes and GroupNodes. type MessageBody struct { @@ -115,7 +105,7 @@ func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageEle m.Decls = decls for _, decl := range decls { switch decl.(type) { - case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneofNode, + case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneOfNode, *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode, *ReservedNode, *EmptyDeclNode: default: @@ -135,7 +125,7 @@ type MessageElement interface { var _ MessageElement = (*OptionNode)(nil) var _ MessageElement = (*FieldNode)(nil) var _ MessageElement = (*MapFieldNode)(nil) -var _ MessageElement = (*OneofNode)(nil) +var _ MessageElement = (*OneOfNode)(nil) var _ MessageElement = (*GroupNode)(nil) var _ MessageElement = (*MessageNode)(nil) var _ MessageElement = (*EnumNode)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/ast/no_source.go b/vendor/github.com/bufbuild/protocompile/ast/no_source.go index 44dbb714..d66d9395 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/no_source.go +++ b/vendor/github.com/bufbuild/protocompile/ast/no_source.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,123 +20,100 @@ func UnknownPos(filename string) SourcePos { return SourcePos{Filename: filename} } -// UnknownSpan is a placeholder span when only the source file -// name is known. -func UnknownSpan(filename string) SourceSpan { - return unknownSpan{filename: filename} -} - -type unknownSpan struct { - filename string -} - -func (s unknownSpan) Start() SourcePos { - return UnknownPos(s.filename) -} - -func (s unknownSpan) End() SourcePos { - return UnknownPos(s.filename) -} - // NoSourceNode is a placeholder AST node that implements numerous // interfaces in this package. It can be used to represent an AST // element for a file whose source is not available. -type NoSourceNode FileInfo +type NoSourceNode struct { + filename string +} // NewNoSourceNode creates a new NoSourceNode for the given filename. -func NewNoSourceNode(filename string) *NoSourceNode { - return &NoSourceNode{name: filename} +func NewNoSourceNode(filename string) NoSourceNode { + return NoSourceNode{filename: filename} } -func (n *NoSourceNode) Name() string { - return n.name +func (n NoSourceNode) Name() string { + return n.filename } -func (n *NoSourceNode) Start() Token { +func (n NoSourceNode) Start() Token { return 0 } -func (n *NoSourceNode) End() Token { +func (n NoSourceNode) End() Token { return 0 } -func (n *NoSourceNode) NodeInfo(Node) NodeInfo { +func (n NoSourceNode) NodeInfo(Node) NodeInfo { return NodeInfo{ - fileInfo: (*FileInfo)(n), + fileInfo: &FileInfo{name: n.filename}, } } -func (n *NoSourceNode) GetSyntax() Node { +func (n NoSourceNode) GetSyntax() Node { return n } -func (n *NoSourceNode) GetName() Node { +func (n NoSourceNode) GetName() Node { return n } -func (n *NoSourceNode) GetValue() ValueNode { +func (n NoSourceNode) GetValue() ValueNode { return n } -func (n *NoSourceNode) FieldLabel() Node { +func (n NoSourceNode) FieldLabel() Node { return n } -func (n *NoSourceNode) FieldName() Node { +func (n NoSourceNode) FieldName() Node { return n } -func (n *NoSourceNode) FieldType() Node { +func (n NoSourceNode) FieldType() Node { return n } -func (n *NoSourceNode) FieldTag() Node { +func (n NoSourceNode) FieldTag() Node { return n } -func (n *NoSourceNode) FieldExtendee() Node { +func (n NoSourceNode) FieldExtendee() Node { return n } -func (n *NoSourceNode) GetGroupKeyword() Node { +func (n NoSourceNode) GetGroupKeyword() Node { return n } -func (n *NoSourceNode) GetOptions() *CompactOptionsNode { +func (n NoSourceNode) GetOptions() *CompactOptionsNode { return nil } -func (n *NoSourceNode) RangeStart() Node { - return n -} - -func (n *NoSourceNode) RangeEnd() Node { +func (n NoSourceNode) RangeStart() Node { return n } -func (n *NoSourceNode) GetNumber() Node { +func (n NoSourceNode) RangeEnd() Node { return n } -func (n *NoSourceNode) MessageName() Node { +func (n NoSourceNode) GetNumber() Node { return n } -func (n *NoSourceNode) OneofName() Node { +func (n NoSourceNode) MessageName() Node { return n } -func (n *NoSourceNode) GetInputType() Node { +func (n NoSourceNode) GetInputType() Node { return n } -func (n *NoSourceNode) GetOutputType() Node { +func (n NoSourceNode) GetOutputType() Node { return n } -func (n *NoSourceNode) Value() interface{} { +func (n NoSourceNode) Value() interface{} { return nil } - -func (n *NoSourceNode) RangeOptions(func(*OptionNode) bool) { -} diff --git a/vendor/github.com/bufbuild/protocompile/ast/node.go b/vendor/github.com/bufbuild/protocompile/ast/node.go index abb76430..63a76c79 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/node.go +++ b/vendor/github.com/bufbuild/protocompile/ast/node.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -133,7 +133,7 @@ func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode { func (e *EmptyDeclNode) fileElement() {} func (e *EmptyDeclNode) msgElement() {} func (e *EmptyDeclNode) extendElement() {} -func (e *EmptyDeclNode) oneofElement() {} +func (e *EmptyDeclNode) oneOfElement() {} func (e *EmptyDeclNode) enumElement() {} func (e *EmptyDeclNode) serviceElement() {} func (e *EmptyDeclNode) methodElement() {} diff --git a/vendor/github.com/bufbuild/protocompile/ast/options.go b/vendor/github.com/bufbuild/protocompile/ast/options.go index be31f0b4..497ad44b 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/options.go +++ b/vendor/github.com/bufbuild/protocompile/ast/options.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ type OptionDeclNode interface { } var _ OptionDeclNode = (*OptionNode)(nil) -var _ OptionDeclNode = (*NoSourceNode)(nil) +var _ OptionDeclNode = NoSourceNode{} // OptionNode represents the declaration of a single option for an element. // It is used both for normal option declarations (start with "option" keyword @@ -43,12 +43,12 @@ type OptionNode struct { Semicolon *RuneNode // absent for compact options } -func (*OptionNode) fileElement() {} -func (*OptionNode) msgElement() {} -func (*OptionNode) oneofElement() {} -func (*OptionNode) enumElement() {} -func (*OptionNode) serviceElement() {} -func (*OptionNode) methodElement() {} +func (n *OptionNode) fileElement() {} +func (n *OptionNode) msgElement() {} +func (n *OptionNode) oneOfElement() {} +func (n *OptionNode) enumElement() {} +func (n *OptionNode) serviceElement() {} +func (n *OptionNode) methodElement() {} // NewOptionNode creates a new *OptionNode for a full option declaration (as // used in files, messages, oneofs, enums, services, and methods). All arguments @@ -71,13 +71,10 @@ func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, if val == nil { panic("val is nil") } - var children []Node if semicolon == nil { - children = []Node{keyword, name, equals, val} - } else { - children = []Node{keyword, name, equals, val, semicolon} + panic("semicolon is nil") } - + children := []Node{keyword, name, equals, val, semicolon} return &OptionNode{ compositeNode: compositeNode{ children: children, @@ -100,18 +97,13 @@ func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) if name == nil { panic("name is nil") } - if equals == nil && val != nil { - panic("equals is nil but val is not") - } - if val == nil && equals != nil { - panic("val is nil but equals is not") + if equals == nil { + panic("equals is nil") } - var children []Node - if equals == nil && val == nil { - children = []Node{name} - } else { - children = []Node{name, equals, val} + if val == nil { + panic("val is nil") } + children := []Node{name, equals, val} return &OptionNode{ compositeNode: compositeNode{ children: children, @@ -159,10 +151,10 @@ func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNam if len(parts) == 0 { panic("must have at least one part") } - if len(dots) != len(parts)-1 && len(dots) != len(parts) { + if len(dots) != len(parts)-1 { panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots))) } - children := make([]Node, 0, len(parts)+len(dots)) + children := make([]Node, 0, len(parts)*2-1) for i, part := range parts { if part == nil { panic(fmt.Sprintf("parts[%d] is nil", i)) @@ -175,12 +167,6 @@ func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNam } children = append(children, part) } - if len(dots) == len(parts) { // Add the erroneous, but tolerated trailing dot. - if dots[len(dots)-1] == nil { - panic(fmt.Sprintf("dots[%d] is nil", len(dots)-1)) - } - children = append(children, dots[len(dots)-1]) - } return &OptionNameNode{ compositeNode: compositeNode{ children: children, @@ -346,33 +332,25 @@ func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []* if closeBracket == nil { panic("closeBracket is nil") } - if len(opts) == 0 && len(commas) != 0 { - panic("opts is empty but commas is not") + if len(opts) == 0 { + panic("must have at least one part") } - if len(opts) != len(commas) && len(opts) != len(commas)+1 { + if len(commas) != len(opts)-1 { panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas))) } - children := make([]Node, 0, len(opts)+len(commas)+2) + children := make([]Node, 0, len(opts)*2+1) children = append(children, openBracket) - if len(opts) > 0 { - for i, opt := range opts { - if i > 0 { - if commas[i-1] == nil { - panic(fmt.Sprintf("commas[%d] is nil", i-1)) - } - children = append(children, commas[i-1]) - } - if opt == nil { - panic(fmt.Sprintf("opts[%d] is nil", i)) + for i, opt := range opts { + if i > 0 { + if commas[i-1] == nil { + panic(fmt.Sprintf("commas[%d] is nil", i-1)) } - children = append(children, opt) + children = append(children, commas[i-1]) } - if len(opts) == len(commas) { // Add the erroneous, but tolerated trailing comma. - if commas[len(commas)-1] == nil { - panic(fmt.Sprintf("commas[%d] is nil", len(commas)-1)) - } - children = append(children, commas[len(commas)-1]) + if opt == nil { + panic(fmt.Sprintf("opts[%d] is nil", i)) } + children = append(children, opt) } children = append(children, closeBracket) @@ -393,21 +371,3 @@ func (e *CompactOptionsNode) GetElements() []*OptionNode { } return e.Options } - -// NodeWithOptions represents a node in the AST that contains -// option statements. -type NodeWithOptions interface { - Node - RangeOptions(func(*OptionNode) bool) -} - -var _ NodeWithOptions = FileDeclNode(nil) -var _ NodeWithOptions = MessageDeclNode(nil) -var _ NodeWithOptions = OneofDeclNode(nil) -var _ NodeWithOptions = (*EnumNode)(nil) -var _ NodeWithOptions = (*ServiceNode)(nil) -var _ NodeWithOptions = RPCDeclNode(nil) -var _ NodeWithOptions = FieldDeclNode(nil) -var _ NodeWithOptions = EnumValueDeclNode(nil) -var _ NodeWithOptions = (*ExtensionRangeNode)(nil) -var _ NodeWithOptions = (*NoSourceNode)(nil) diff --git a/vendor/github.com/bufbuild/protocompile/ast/ranges.go b/vendor/github.com/bufbuild/protocompile/ast/ranges.go index c42908e1..453e5a06 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/ranges.go +++ b/vendor/github.com/bufbuild/protocompile/ast/ranges.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ type ExtensionRangeNode struct { Semicolon *RuneNode } -func (*ExtensionRangeNode) msgElement() {} +func (e *ExtensionRangeNode) msgElement() {} // NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be // non-nil except opts, which may be nil. @@ -90,14 +90,6 @@ func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []* } } -func (e *ExtensionRangeNode) RangeOptions(fn func(*OptionNode) bool) { - for _, opt := range e.Options.Options { - if !fn(opt) { - return - } - } -} - // RangeDeclNode is a placeholder interface for AST nodes that represent // numeric values. This allows NoSourceNode to be used in place of *RangeNode // for some usages. @@ -108,7 +100,7 @@ type RangeDeclNode interface { } var _ RangeDeclNode = (*RangeNode)(nil) -var _ RangeDeclNode = (*NoSourceNode)(nil) +var _ RangeDeclNode = NoSourceNode{} // RangeNode represents a range expression, used in both extension ranges and // reserved ranges. Example: @@ -129,16 +121,16 @@ type RangeNode struct { // then so must be exactly one of end or max. If max is non-nil, it indicates a // "100 to max" style range. But if end is non-nil, the end of the range is a // literal, such as "100 to 200". -func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd *KeywordNode) *RangeNode { +func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, max *KeywordNode) *RangeNode { if start == nil { panic("start is nil") } numChildren := 1 if to != nil { - if end == nil && maxEnd == nil { + if end == nil && max == nil { panic("to is not nil, but end and max both are") } - if end != nil && maxEnd != nil { + if end != nil && max != nil { panic("end and max cannot be both non-nil") } numChildren = 3 @@ -146,7 +138,7 @@ func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd if end != nil { panic("to is nil, but end is not") } - if maxEnd != nil { + if max != nil { panic("to is nil, but max is not") } } @@ -157,7 +149,7 @@ func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd if end != nil { children = append(children, end) } else { - children = append(children, maxEnd) + children = append(children, max) } } return &RangeNode{ @@ -167,7 +159,7 @@ func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd StartVal: start, To: to, EndVal: end, - Max: maxEnd, + Max: max, } } @@ -189,8 +181,8 @@ func (n *RangeNode) StartValue() interface{} { return n.StartVal.Value() } -func (n *RangeNode) StartValueAsInt32(minVal, maxVal int32) (int32, bool) { - return AsInt32(n.StartVal, minVal, maxVal) +func (n *RangeNode) StartValueAsInt32(min, max int32) (int32, bool) { + return AsInt32(n.StartVal, min, max) } func (n *RangeNode) EndValue() interface{} { @@ -200,14 +192,14 @@ func (n *RangeNode) EndValue() interface{} { return n.EndVal.Value() } -func (n *RangeNode) EndValueAsInt32(minVal, maxVal int32) (int32, bool) { +func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) { if n.Max != nil { - return maxVal, true + return max, true } if n.EndVal == nil { - return n.StartValueAsInt32(minVal, maxVal) + return n.StartValueAsInt32(min, max) } - return AsInt32(n.EndVal, minVal, maxVal) + return AsInt32(n.EndVal, min, max) } // ReservedNode represents reserved declaration, which can be used to reserve @@ -215,20 +207,13 @@ func (n *RangeNode) EndValueAsInt32(minVal, maxVal int32) (int32, bool) { // // reserved 1, 10-12, 15; // reserved "foo", "bar", "baz"; -// reserved foo, bar, baz; type ReservedNode struct { compositeNode Keyword *KeywordNode - // If non-empty, this node represents reserved ranges, and Names and Identifiers - // will be empty. + // If non-empty, this node represents reserved ranges and Names will be empty. Ranges []*RangeNode - // If non-empty, this node represents reserved names as string literals, and - // Ranges and Identifiers will be empty. String literals are used for reserved - // names in proto2 and proto3 syntax. + // If non-empty, this node represents reserved names and Ranges will be empty. Names []StringValueNode - // If non-empty, this node represents reserved names as identifiers, and Ranges - // and Names will be empty. Identifiers are used for reserved names in editions. - Identifiers []*IdentNode // Commas represent the separating ',' characters between options. The // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending // on whether this node represents reserved ranges or reserved names. Each item @@ -298,17 +283,16 @@ func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas if keyword == nil { panic("keyword is nil") } + if semicolon == nil { + panic("semicolon is nil") + } if len(names) == 0 { panic("must have at least one name") } if len(commas) != len(names)-1 { panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) } - numChildren := len(names) * 2 - if semicolon != nil { - numChildren++ - } - children := make([]Node, 0, numChildren) + children := make([]Node, 0, len(names)*2+1) children = append(children, keyword) for i, name := range names { if i > 0 { @@ -322,9 +306,7 @@ func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas } children = append(children, name) } - if semicolon != nil { - children = append(children, semicolon) - } + children = append(children, semicolon) return &ReservedNode{ compositeNode: compositeNode{ children: children, @@ -335,52 +317,3 @@ func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas Semicolon: semicolon, } } - -// NewReservedIdentifiersNode creates a new *ReservedNode that represents reserved -// names. All args must be non-nil. -// - keyword: The token corresponding to the "reserved" keyword. -// - names: One or more names. -// - commas: Tokens that represent the "," runes that delimit the names. -// The length of commas must be one less than the length of names. -// - semicolon The token corresponding to the ";" rune that ends the declaration. -func NewReservedIdentifiersNode(keyword *KeywordNode, names []*IdentNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode { - if keyword == nil { - panic("keyword is nil") - } - if len(names) == 0 { - panic("must have at least one name") - } - if len(commas) != len(names)-1 { - panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas))) - } - numChildren := len(names) * 2 - if semicolon != nil { - numChildren++ - } - children := make([]Node, 0, numChildren) - children = append(children, keyword) - for i, name := range names { - if i > 0 { - if commas[i-1] == nil { - panic(fmt.Sprintf("commas[%d] is nil", i-1)) - } - children = append(children, commas[i-1]) - } - if name == nil { - panic(fmt.Sprintf("names[%d] is nil", i)) - } - children = append(children, name) - } - if semicolon != nil { - children = append(children, semicolon) - } - return &ReservedNode{ - compositeNode: compositeNode{ - children: children, - }, - Keyword: keyword, - Identifiers: names, - Commas: commas, - Semicolon: semicolon, - } -} diff --git a/vendor/github.com/bufbuild/protocompile/ast/service.go b/vendor/github.com/bufbuild/protocompile/ast/service.go index eba22fd2..c1807488 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/service.go +++ b/vendor/github.com/bufbuild/protocompile/ast/service.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -76,16 +76,6 @@ func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, } } -func (n *ServiceNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - // ServiceElement is an interface implemented by all AST nodes that can // appear in the body of a service declaration. type ServiceElement interface { @@ -101,14 +91,14 @@ var _ ServiceElement = (*EmptyDeclNode)(nil) // declarations. This allows NoSourceNode to be used in place of *RPCNode // for some usages. type RPCDeclNode interface { - NodeWithOptions + Node GetName() Node GetInputType() Node GetOutputType() Node } var _ RPCDeclNode = (*RPCNode)(nil) -var _ RPCDeclNode = (*NoSourceNode)(nil) +var _ RPCDeclNode = NoSourceNode{} // RPCNode represents an RPC declaration. Example: // @@ -151,12 +141,10 @@ func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, retur if output == nil { panic("output is nil") } - var children []Node if semicolon == nil { - children = []Node{keyword, name, input, returns, output} - } else { - children = []Node{keyword, name, input, returns, output, semicolon} + panic("semicolon is nil") } + children := []Node{keyword, name, input, returns, output, semicolon} return &RPCNode{ compositeNode: compositeNode{ children: children, @@ -241,16 +229,6 @@ func (n *RPCNode) GetOutputType() Node { return n.Output.MessageType } -func (n *RPCNode) RangeOptions(fn func(*OptionNode) bool) { - for _, decl := range n.Decls { - if opt, ok := decl.(*OptionNode); ok { - if !fn(opt) { - return - } - } - } -} - // RPCElement is an interface implemented by all AST nodes that can // appear in the body of an rpc declaration (aka method). type RPCElement interface { diff --git a/vendor/github.com/bufbuild/protocompile/ast/values.go b/vendor/github.com/bufbuild/protocompile/ast/values.go index 22bd208d..b9f050aa 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/values.go +++ b/vendor/github.com/bufbuild/protocompile/ast/values.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,10 +36,6 @@ type ValueNode interface { // literal: // * For array literals, the type returned will be []ValueNode // * For message literals, the type returned will be []*MessageFieldNode - // - // If the ValueNode is a NoSourceNode, indicating that there is no actual - // source code (and thus not AST information), then this method always - // returns nil. Value() interface{} } @@ -48,13 +44,14 @@ var _ ValueNode = (*CompoundIdentNode)(nil) var _ ValueNode = (*StringLiteralNode)(nil) var _ ValueNode = (*CompoundStringLiteralNode)(nil) var _ ValueNode = (*UintLiteralNode)(nil) +var _ ValueNode = (*PositiveUintLiteralNode)(nil) var _ ValueNode = (*NegativeIntLiteralNode)(nil) var _ ValueNode = (*FloatLiteralNode)(nil) var _ ValueNode = (*SpecialFloatLiteralNode)(nil) var _ ValueNode = (*SignedFloatLiteralNode)(nil) var _ ValueNode = (*ArrayLiteralNode)(nil) var _ ValueNode = (*MessageLiteralNode)(nil) -var _ ValueNode = (*NoSourceNode)(nil) +var _ ValueNode = NoSourceNode{} // StringValueNode is an AST node that represents a string literal. // Such a node can be a single literal (*StringLiteralNode) or a @@ -141,18 +138,19 @@ type IntValueNode interface { // AsInt32 range checks the given int value and returns its value is // in the range or 0, false if it is outside the range. -func AsInt32(n IntValueNode, minVal, maxVal int32) (int32, bool) { +func AsInt32(n IntValueNode, min, max int32) (int32, bool) { i, ok := n.AsInt64() if !ok { return 0, false } - if i < int64(minVal) || i > int64(maxVal) { + if i < int64(min) || i > int64(max) { return 0, false } return int32(i), true } var _ IntValueNode = (*UintLiteralNode)(nil) +var _ IntValueNode = (*PositiveUintLiteralNode)(nil) var _ IntValueNode = (*NegativeIntLiteralNode)(nil) // UintLiteralNode represents a simple integer literal with no sign character. @@ -189,6 +187,49 @@ func (n *UintLiteralNode) AsFloat() float64 { return float64(n.Val) } +// PositiveUintLiteralNode represents an integer literal with a positive (+) sign. +type PositiveUintLiteralNode struct { + compositeNode + Plus *RuneNode + Uint *UintLiteralNode + Val uint64 +} + +// NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both +// arguments must be non-nil. +func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode { + if sign == nil { + panic("sign is nil") + } + if i == nil { + panic("i is nil") + } + children := []Node{sign, i} + return &PositiveUintLiteralNode{ + compositeNode: compositeNode{ + children: children, + }, + Plus: sign, + Uint: i, + Val: i.Val, + } +} + +func (n *PositiveUintLiteralNode) Value() interface{} { + return n.Val +} + +func (n *PositiveUintLiteralNode) AsInt64() (int64, bool) { + if n.Val > math.MaxInt64 { + return 0, false + } + return int64(n.Val), true +} + +func (n *PositiveUintLiteralNode) AsUint64() (uint64, bool) { + return n.Val, true +} + // NegativeIntLiteralNode represents an integer literal with a negative (-) sign. type NegativeIntLiteralNode struct { compositeNode @@ -275,14 +316,12 @@ type SpecialFloatLiteralNode struct { } // NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the -// given keyword. The given keyword should be "inf", "infinity", or "nan" -// in any case. +// given keyword, which must be "inf" or "nan". func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode { var f float64 - switch strings.ToLower(name.Val) { - case "inf", "infinity": + if name.Val == "inf" { f = math.Inf(1) - default: + } else { f = math.NaN() } return &SpecialFloatLiteralNode{ diff --git a/vendor/github.com/bufbuild/protocompile/ast/walk.go b/vendor/github.com/bufbuild/protocompile/ast/walk.go index 00e71ab7..a969ed46 100644 --- a/vendor/github.com/bufbuild/protocompile/ast/walk.go +++ b/vendor/github.com/bufbuild/protocompile/ast/walk.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -103,8 +103,6 @@ func Visit(n Node, v Visitor) error { return v.VisitFileNode(n) case *SyntaxNode: return v.VisitSyntaxNode(n) - case *EditionNode: - return v.VisitEditionNode(n) case *PackageNode: return v.VisitPackageNode(n) case *ImportNode: @@ -135,8 +133,8 @@ func Visit(n Node, v Visitor) error { return v.VisitMapFieldNode(n) case *MapTypeNode: return v.VisitMapTypeNode(n) - case *OneofNode: - return v.VisitOneofNode(n) + case *OneOfNode: + return v.VisitOneOfNode(n) case *EnumNode: return v.VisitEnumNode(n) case *EnumValueNode: @@ -157,6 +155,8 @@ func Visit(n Node, v Visitor) error { return v.VisitCompoundStringLiteralNode(n) case *UintLiteralNode: return v.VisitUintLiteralNode(n) + case *PositiveUintLiteralNode: + return v.VisitPositiveUintLiteralNode(n) case *NegativeIntLiteralNode: return v.VisitNegativeIntLiteralNode(n) case *FloatLiteralNode: @@ -197,7 +197,7 @@ func (t *AncestorTracker) AsWalkOptions() []WalkOption { t.ancestors = append(t.ancestors, n) return nil }), - WithAfter(func(_ Node) error { + WithAfter(func(n Node) error { t.ancestors = t.ancestors[:len(t.ancestors)-1] return nil }), @@ -242,33 +242,17 @@ func VisitChildren(n CompositeNode, v Visitor) error { // It consists of a number of functions, each of which matches a // concrete Node type. // -// NOTE: As the language evolves, new methods may be added to this -// interface to correspond to new grammar elements. That is why it -// cannot be directly implemented outside this package. Visitor -// implementations must embed NoOpVisitor and then implement the -// subset of methods of interest. If such an implementation is used -// with an AST that has newer elements, the visitor will not do -// anything in response to the new node types. -// -// An alternative to embedding NoOpVisitor is to use an instance of -// SimpleVisitor. +// Most visitor implementations will either embed NoOpVisitor (so as +// not to have to implement *all* of the methods) or will be instances +// of SimpleVisitor. // // Visitors can be supplied to a Walk operation or passed to a call // to Visit or VisitChildren. -// -// Note that there are some AST node types defined in this package -// that do not have corresponding visit methods. These are synthetic -// node types, that have specialized use from the parser, but never -// appear in an actual AST (which is always rooted at FileNode). -// These include SyntheticMapField, SyntheticOneof, -// SyntheticGroupMessageNode, and SyntheticMapEntryNode. type Visitor interface { // VisitFileNode is invoked when visiting a *FileNode in the AST. VisitFileNode(*FileNode) error // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. VisitSyntaxNode(*SyntaxNode) error - // VisitEditionNode is invoked when visiting an *EditionNode in the AST. - VisitEditionNode(*EditionNode) error // VisitPackageNode is invoked when visiting a *PackageNode in the AST. VisitPackageNode(*PackageNode) error // VisitImportNode is invoked when visiting an *ImportNode in the AST. @@ -299,8 +283,8 @@ type Visitor interface { VisitMapFieldNode(*MapFieldNode) error // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST. VisitMapTypeNode(*MapTypeNode) error - // VisitOneofNode is invoked when visiting a *OneofNode in the AST. - VisitOneofNode(*OneofNode) error + // VisitOneOfNode is invoked when visiting a *OneOfNode in the AST. + VisitOneOfNode(*OneOfNode) error // VisitEnumNode is invoked when visiting an *EnumNode in the AST. VisitEnumNode(*EnumNode) error // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST. @@ -321,6 +305,8 @@ type Visitor interface { VisitCompoundStringLiteralNode(*CompoundStringLiteralNode) error // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. VisitUintLiteralNode(*UintLiteralNode) error + // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST. + VisitPositiveUintLiteralNode(*PositiveUintLiteralNode) error // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. VisitNegativeIntLiteralNode(*NegativeIntLiteralNode) error // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST. @@ -341,9 +327,6 @@ type Visitor interface { VisitRuneNode(*RuneNode) error // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST. VisitEmptyDeclNode(*EmptyDeclNode) error - - // Unexported method prevents callers from directly implementing. - isVisitor() } // NoOpVisitor is a visitor implementation that does nothing. All methods @@ -354,8 +337,6 @@ type NoOpVisitor struct{} var _ Visitor = NoOpVisitor{} -func (n NoOpVisitor) isVisitor() {} - func (n NoOpVisitor) VisitFileNode(_ *FileNode) error { return nil } @@ -364,10 +345,6 @@ func (n NoOpVisitor) VisitSyntaxNode(_ *SyntaxNode) error { return nil } -func (n NoOpVisitor) VisitEditionNode(_ *EditionNode) error { - return nil -} - func (n NoOpVisitor) VisitPackageNode(_ *PackageNode) error { return nil } @@ -428,7 +405,7 @@ func (n NoOpVisitor) VisitMapTypeNode(_ *MapTypeNode) error { return nil } -func (n NoOpVisitor) VisitOneofNode(_ *OneofNode) error { +func (n NoOpVisitor) VisitOneOfNode(_ *OneOfNode) error { return nil } @@ -472,6 +449,10 @@ func (n NoOpVisitor) VisitUintLiteralNode(_ *UintLiteralNode) error { return nil } +func (n NoOpVisitor) VisitPositiveUintLiteralNode(_ *PositiveUintLiteralNode) error { + return nil +} + func (n NoOpVisitor) VisitNegativeIntLiteralNode(_ *NegativeIntLiteralNode) error { return nil } @@ -541,7 +522,6 @@ func (n NoOpVisitor) VisitEmptyDeclNode(_ *EmptyDeclNode) error { type SimpleVisitor struct { DoVisitFileNode func(*FileNode) error DoVisitSyntaxNode func(*SyntaxNode) error - DoVisitEditionNode func(*EditionNode) error DoVisitPackageNode func(*PackageNode) error DoVisitImportNode func(*ImportNode) error DoVisitOptionNode func(*OptionNode) error @@ -557,7 +537,7 @@ type SimpleVisitor struct { DoVisitGroupNode func(*GroupNode) error DoVisitMapFieldNode func(*MapFieldNode) error DoVisitMapTypeNode func(*MapTypeNode) error - DoVisitOneofNode func(*OneofNode) error + DoVisitOneOfNode func(*OneOfNode) error DoVisitEnumNode func(*EnumNode) error DoVisitEnumValueNode func(*EnumValueNode) error DoVisitServiceNode func(*ServiceNode) error @@ -568,6 +548,7 @@ type SimpleVisitor struct { DoVisitStringLiteralNode func(*StringLiteralNode) error DoVisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) error DoVisitUintLiteralNode func(*UintLiteralNode) error + DoVisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) error DoVisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) error DoVisitFloatLiteralNode func(*FloatLiteralNode) error DoVisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) error @@ -595,337 +576,335 @@ type SimpleVisitor struct { var _ Visitor = (*SimpleVisitor)(nil) -func (v *SimpleVisitor) isVisitor() {} - -func (v *SimpleVisitor) visitInterface(node Node) error { +func (b *SimpleVisitor) visitInterface(node Node) error { switch n := node.(type) { case FieldDeclNode: - if v.DoVisitFieldDeclNode != nil { - return v.DoVisitFieldDeclNode(n) + if b.DoVisitFieldDeclNode != nil { + return b.DoVisitFieldDeclNode(n) } // *MapFieldNode and *GroupNode both implement both FieldDeclNode and // MessageDeclNode, so handle other case here - if fn, ok := n.(MessageDeclNode); ok && v.DoVisitMessageDeclNode != nil { - return v.DoVisitMessageDeclNode(fn) + if fn, ok := n.(MessageDeclNode); ok && b.DoVisitMessageDeclNode != nil { + return b.DoVisitMessageDeclNode(fn) } case MessageDeclNode: - if v.DoVisitMessageDeclNode != nil { - return v.DoVisitMessageDeclNode(n) + if b.DoVisitMessageDeclNode != nil { + return b.DoVisitMessageDeclNode(n) } case IdentValueNode: - if v.DoVisitIdentValueNode != nil { - return v.DoVisitIdentValueNode(n) + if b.DoVisitIdentValueNode != nil { + return b.DoVisitIdentValueNode(n) } case StringValueNode: - if v.DoVisitStringValueNode != nil { - return v.DoVisitStringValueNode(n) + if b.DoVisitStringValueNode != nil { + return b.DoVisitStringValueNode(n) } case IntValueNode: - if v.DoVisitIntValueNode != nil { - return v.DoVisitIntValueNode(n) + if b.DoVisitIntValueNode != nil { + return b.DoVisitIntValueNode(n) } // *UintLiteralNode implements both IntValueNode and FloatValueNode, // so handle other case here - if fn, ok := n.(FloatValueNode); ok && v.DoVisitFloatValueNode != nil { - return v.DoVisitFloatValueNode(fn) + if fn, ok := n.(FloatValueNode); ok && b.DoVisitFloatValueNode != nil { + return b.DoVisitFloatValueNode(fn) } case FloatValueNode: - if v.DoVisitFloatValueNode != nil { - return v.DoVisitFloatValueNode(n) + if b.DoVisitFloatValueNode != nil { + return b.DoVisitFloatValueNode(n) } } - if n, ok := node.(ValueNode); ok && v.DoVisitValueNode != nil { - return v.DoVisitValueNode(n) + if n, ok := node.(ValueNode); ok && b.DoVisitValueNode != nil { + return b.DoVisitValueNode(n) } switch n := node.(type) { case TerminalNode: - if v.DoVisitTerminalNode != nil { - return v.DoVisitTerminalNode(n) + if b.DoVisitTerminalNode != nil { + return b.DoVisitTerminalNode(n) } case CompositeNode: - if v.DoVisitCompositeNode != nil { - return v.DoVisitCompositeNode(n) + if b.DoVisitCompositeNode != nil { + return b.DoVisitCompositeNode(n) } } - if v.DoVisitNode != nil { - return v.DoVisitNode(node) + if b.DoVisitNode != nil { + return b.DoVisitNode(node) } return nil } -func (v *SimpleVisitor) VisitFileNode(node *FileNode) error { - if v.DoVisitFileNode != nil { - return v.DoVisitFileNode(node) +func (b *SimpleVisitor) VisitFileNode(node *FileNode) error { + if b.DoVisitFileNode != nil { + return b.DoVisitFileNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitSyntaxNode(node *SyntaxNode) error { - if v.DoVisitSyntaxNode != nil { - return v.DoVisitSyntaxNode(node) +func (b *SimpleVisitor) VisitSyntaxNode(node *SyntaxNode) error { + if b.DoVisitSyntaxNode != nil { + return b.DoVisitSyntaxNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitEditionNode(node *EditionNode) error { - if v.DoVisitEditionNode != nil { - return v.DoVisitEditionNode(node) +func (b *SimpleVisitor) VisitPackageNode(node *PackageNode) error { + if b.DoVisitPackageNode != nil { + return b.DoVisitPackageNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitPackageNode(node *PackageNode) error { - if v.DoVisitPackageNode != nil { - return v.DoVisitPackageNode(node) +func (b *SimpleVisitor) VisitImportNode(node *ImportNode) error { + if b.DoVisitImportNode != nil { + return b.DoVisitImportNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitImportNode(node *ImportNode) error { - if v.DoVisitImportNode != nil { - return v.DoVisitImportNode(node) +func (b *SimpleVisitor) VisitOptionNode(node *OptionNode) error { + if b.DoVisitOptionNode != nil { + return b.DoVisitOptionNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitOptionNode(node *OptionNode) error { - if v.DoVisitOptionNode != nil { - return v.DoVisitOptionNode(node) +func (b *SimpleVisitor) VisitOptionNameNode(node *OptionNameNode) error { + if b.DoVisitOptionNameNode != nil { + return b.DoVisitOptionNameNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitOptionNameNode(node *OptionNameNode) error { - if v.DoVisitOptionNameNode != nil { - return v.DoVisitOptionNameNode(node) +func (b *SimpleVisitor) VisitFieldReferenceNode(node *FieldReferenceNode) error { + if b.DoVisitFieldReferenceNode != nil { + return b.DoVisitFieldReferenceNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitFieldReferenceNode(node *FieldReferenceNode) error { - if v.DoVisitFieldReferenceNode != nil { - return v.DoVisitFieldReferenceNode(node) +func (b *SimpleVisitor) VisitCompactOptionsNode(node *CompactOptionsNode) error { + if b.DoVisitCompactOptionsNode != nil { + return b.DoVisitCompactOptionsNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitCompactOptionsNode(node *CompactOptionsNode) error { - if v.DoVisitCompactOptionsNode != nil { - return v.DoVisitCompactOptionsNode(node) +func (b *SimpleVisitor) VisitMessageNode(node *MessageNode) error { + if b.DoVisitMessageNode != nil { + return b.DoVisitMessageNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitMessageNode(node *MessageNode) error { - if v.DoVisitMessageNode != nil { - return v.DoVisitMessageNode(node) +func (b *SimpleVisitor) VisitExtendNode(node *ExtendNode) error { + if b.DoVisitExtendNode != nil { + return b.DoVisitExtendNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitExtendNode(node *ExtendNode) error { - if v.DoVisitExtendNode != nil { - return v.DoVisitExtendNode(node) +func (b *SimpleVisitor) VisitExtensionRangeNode(node *ExtensionRangeNode) error { + if b.DoVisitExtensionRangeNode != nil { + return b.DoVisitExtensionRangeNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitExtensionRangeNode(node *ExtensionRangeNode) error { - if v.DoVisitExtensionRangeNode != nil { - return v.DoVisitExtensionRangeNode(node) +func (b *SimpleVisitor) VisitReservedNode(node *ReservedNode) error { + if b.DoVisitReservedNode != nil { + return b.DoVisitReservedNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitReservedNode(node *ReservedNode) error { - if v.DoVisitReservedNode != nil { - return v.DoVisitReservedNode(node) +func (b *SimpleVisitor) VisitRangeNode(node *RangeNode) error { + if b.DoVisitRangeNode != nil { + return b.DoVisitRangeNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitRangeNode(node *RangeNode) error { - if v.DoVisitRangeNode != nil { - return v.DoVisitRangeNode(node) +func (b *SimpleVisitor) VisitFieldNode(node *FieldNode) error { + if b.DoVisitFieldNode != nil { + return b.DoVisitFieldNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitFieldNode(node *FieldNode) error { - if v.DoVisitFieldNode != nil { - return v.DoVisitFieldNode(node) +func (b *SimpleVisitor) VisitGroupNode(node *GroupNode) error { + if b.DoVisitGroupNode != nil { + return b.DoVisitGroupNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitGroupNode(node *GroupNode) error { - if v.DoVisitGroupNode != nil { - return v.DoVisitGroupNode(node) +func (b *SimpleVisitor) VisitMapFieldNode(node *MapFieldNode) error { + if b.DoVisitMapFieldNode != nil { + return b.DoVisitMapFieldNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitMapFieldNode(node *MapFieldNode) error { - if v.DoVisitMapFieldNode != nil { - return v.DoVisitMapFieldNode(node) +func (b *SimpleVisitor) VisitMapTypeNode(node *MapTypeNode) error { + if b.DoVisitMapTypeNode != nil { + return b.DoVisitMapTypeNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitMapTypeNode(node *MapTypeNode) error { - if v.DoVisitMapTypeNode != nil { - return v.DoVisitMapTypeNode(node) +func (b *SimpleVisitor) VisitOneOfNode(node *OneOfNode) error { + if b.DoVisitOneOfNode != nil { + return b.DoVisitOneOfNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitOneofNode(node *OneofNode) error { - if v.DoVisitOneofNode != nil { - return v.DoVisitOneofNode(node) +func (b *SimpleVisitor) VisitEnumNode(node *EnumNode) error { + if b.DoVisitEnumNode != nil { + return b.DoVisitEnumNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitEnumNode(node *EnumNode) error { - if v.DoVisitEnumNode != nil { - return v.DoVisitEnumNode(node) +func (b *SimpleVisitor) VisitEnumValueNode(node *EnumValueNode) error { + if b.DoVisitEnumValueNode != nil { + return b.DoVisitEnumValueNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitEnumValueNode(node *EnumValueNode) error { - if v.DoVisitEnumValueNode != nil { - return v.DoVisitEnumValueNode(node) +func (b *SimpleVisitor) VisitServiceNode(node *ServiceNode) error { + if b.DoVisitServiceNode != nil { + return b.DoVisitServiceNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitServiceNode(node *ServiceNode) error { - if v.DoVisitServiceNode != nil { - return v.DoVisitServiceNode(node) +func (b *SimpleVisitor) VisitRPCNode(node *RPCNode) error { + if b.DoVisitRPCNode != nil { + return b.DoVisitRPCNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitRPCNode(node *RPCNode) error { - if v.DoVisitRPCNode != nil { - return v.DoVisitRPCNode(node) +func (b *SimpleVisitor) VisitRPCTypeNode(node *RPCTypeNode) error { + if b.DoVisitRPCTypeNode != nil { + return b.DoVisitRPCTypeNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitRPCTypeNode(node *RPCTypeNode) error { - if v.DoVisitRPCTypeNode != nil { - return v.DoVisitRPCTypeNode(node) +func (b *SimpleVisitor) VisitIdentNode(node *IdentNode) error { + if b.DoVisitIdentNode != nil { + return b.DoVisitIdentNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitIdentNode(node *IdentNode) error { - if v.DoVisitIdentNode != nil { - return v.DoVisitIdentNode(node) +func (b *SimpleVisitor) VisitCompoundIdentNode(node *CompoundIdentNode) error { + if b.DoVisitCompoundIdentNode != nil { + return b.DoVisitCompoundIdentNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitCompoundIdentNode(node *CompoundIdentNode) error { - if v.DoVisitCompoundIdentNode != nil { - return v.DoVisitCompoundIdentNode(node) +func (b *SimpleVisitor) VisitStringLiteralNode(node *StringLiteralNode) error { + if b.DoVisitStringLiteralNode != nil { + return b.DoVisitStringLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitStringLiteralNode(node *StringLiteralNode) error { - if v.DoVisitStringLiteralNode != nil { - return v.DoVisitStringLiteralNode(node) +func (b *SimpleVisitor) VisitCompoundStringLiteralNode(node *CompoundStringLiteralNode) error { + if b.DoVisitCompoundStringLiteralNode != nil { + return b.DoVisitCompoundStringLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitCompoundStringLiteralNode(node *CompoundStringLiteralNode) error { - if v.DoVisitCompoundStringLiteralNode != nil { - return v.DoVisitCompoundStringLiteralNode(node) +func (b *SimpleVisitor) VisitUintLiteralNode(node *UintLiteralNode) error { + if b.DoVisitUintLiteralNode != nil { + return b.DoVisitUintLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitUintLiteralNode(node *UintLiteralNode) error { - if v.DoVisitUintLiteralNode != nil { - return v.DoVisitUintLiteralNode(node) +func (b *SimpleVisitor) VisitPositiveUintLiteralNode(node *PositiveUintLiteralNode) error { + if b.DoVisitPositiveUintLiteralNode != nil { + return b.DoVisitPositiveUintLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitNegativeIntLiteralNode(node *NegativeIntLiteralNode) error { - if v.DoVisitNegativeIntLiteralNode != nil { - return v.DoVisitNegativeIntLiteralNode(node) +func (b *SimpleVisitor) VisitNegativeIntLiteralNode(node *NegativeIntLiteralNode) error { + if b.DoVisitNegativeIntLiteralNode != nil { + return b.DoVisitNegativeIntLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitFloatLiteralNode(node *FloatLiteralNode) error { - if v.DoVisitFloatLiteralNode != nil { - return v.DoVisitFloatLiteralNode(node) +func (b *SimpleVisitor) VisitFloatLiteralNode(node *FloatLiteralNode) error { + if b.DoVisitFloatLiteralNode != nil { + return b.DoVisitFloatLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitSpecialFloatLiteralNode(node *SpecialFloatLiteralNode) error { - if v.DoVisitSpecialFloatLiteralNode != nil { - return v.DoVisitSpecialFloatLiteralNode(node) +func (b *SimpleVisitor) VisitSpecialFloatLiteralNode(node *SpecialFloatLiteralNode) error { + if b.DoVisitSpecialFloatLiteralNode != nil { + return b.DoVisitSpecialFloatLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitSignedFloatLiteralNode(node *SignedFloatLiteralNode) error { - if v.DoVisitSignedFloatLiteralNode != nil { - return v.DoVisitSignedFloatLiteralNode(node) +func (b *SimpleVisitor) VisitSignedFloatLiteralNode(node *SignedFloatLiteralNode) error { + if b.DoVisitSignedFloatLiteralNode != nil { + return b.DoVisitSignedFloatLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitArrayLiteralNode(node *ArrayLiteralNode) error { - if v.DoVisitArrayLiteralNode != nil { - return v.DoVisitArrayLiteralNode(node) +func (b *SimpleVisitor) VisitArrayLiteralNode(node *ArrayLiteralNode) error { + if b.DoVisitArrayLiteralNode != nil { + return b.DoVisitArrayLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitMessageLiteralNode(node *MessageLiteralNode) error { - if v.DoVisitMessageLiteralNode != nil { - return v.DoVisitMessageLiteralNode(node) +func (b *SimpleVisitor) VisitMessageLiteralNode(node *MessageLiteralNode) error { + if b.DoVisitMessageLiteralNode != nil { + return b.DoVisitMessageLiteralNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitMessageFieldNode(node *MessageFieldNode) error { - if v.DoVisitMessageFieldNode != nil { - return v.DoVisitMessageFieldNode(node) +func (b *SimpleVisitor) VisitMessageFieldNode(node *MessageFieldNode) error { + if b.DoVisitMessageFieldNode != nil { + return b.DoVisitMessageFieldNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitKeywordNode(node *KeywordNode) error { - if v.DoVisitKeywordNode != nil { - return v.DoVisitKeywordNode(node) +func (b *SimpleVisitor) VisitKeywordNode(node *KeywordNode) error { + if b.DoVisitKeywordNode != nil { + return b.DoVisitKeywordNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitRuneNode(node *RuneNode) error { - if v.DoVisitRuneNode != nil { - return v.DoVisitRuneNode(node) +func (b *SimpleVisitor) VisitRuneNode(node *RuneNode) error { + if b.DoVisitRuneNode != nil { + return b.DoVisitRuneNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } -func (v *SimpleVisitor) VisitEmptyDeclNode(node *EmptyDeclNode) error { - if v.DoVisitEmptyDeclNode != nil { - return v.DoVisitEmptyDeclNode(node) +func (b *SimpleVisitor) VisitEmptyDeclNode(node *EmptyDeclNode) error { + if b.DoVisitEmptyDeclNode != nil { + return b.DoVisitEmptyDeclNode(node) } - return v.visitInterface(node) + return b.visitInterface(node) } diff --git a/vendor/github.com/bufbuild/protocompile/compiler.go b/vendor/github.com/bufbuild/protocompile/compiler.go index b9a6d15e..0bb7f182 100644 --- a/vendor/github.com/bufbuild/protocompile/compiler.go +++ b/vendor/github.com/bufbuild/protocompile/compiler.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ package protocompile import ( "bytes" "context" - "errors" "fmt" "io" "runtime" @@ -90,15 +89,6 @@ type Compiler struct { // will be removed as soon as it's no longer needed. This can help reduce // total memory usage for operations involving a large number of files. RetainASTs bool - - // If non-nil, the set of symbols already known. Any symbols in the current - // compilation will be added to it. If the compilation tries to redefine any - // of these symbols, it will be reported as a collision. - // - // This allows a large compilation to be split up into multiple, smaller - // operations and still be able to identify naming collisions and extension - // number collisions across all operations. - Symbols *linker.Symbols } // SourceInfoMode indicates how source code info is generated by a Compiler. @@ -106,19 +96,14 @@ type SourceInfoMode int const ( // SourceInfoNone indicates that no source code info is generated. - SourceInfoNone = SourceInfoMode(0) + SourceInfoNone = SourceInfoMode(iota) // SourceInfoStandard indicates that the standard source code info is // generated, which includes comments only for complete declarations. - SourceInfoStandard = SourceInfoMode(1) + SourceInfoStandard // SourceInfoExtraComments indicates that source code info is generated // and will include comments for all elements (more comments than would // be found in a descriptor produced by protoc). - SourceInfoExtraComments = SourceInfoMode(2) - // SourceInfoExtraOptionLocations indicates that source code info is - // generated with additional locations for elements inside of message - // literals in option values. This can be combined with the above by - // bitwise-OR'ing it with SourceInfoExtraComments. - SourceInfoExtraOptionLocations = SourceInfoMode(4) + SourceInfoExtraComments ) // Compile compiles the given file names into fully-linked descriptors. The @@ -150,16 +135,12 @@ func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, h := reporter.NewHandler(c.Reporter) - sym := c.Symbols - if sym == nil { - sym = &linker.Symbols{} - } e := executor{ c: c, h: h, s: semaphore.NewWeighted(int64(par)), cancel: cancel, - sym: sym, + sym: &linker.Symbols{}, results: map[string]*result{}, } @@ -454,28 +435,31 @@ func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker. } } - var overrideDescriptorProto linker.File if len(imports) > 0 { t.r.setBlockedOn(imports) results := make([]*result, len(fileDescriptorProto.Dependency)) checked := map[string]struct{}{} for i, dep := range fileDescriptorProto.Dependency { - span := findImportSpan(parseRes, dep) + pos := findImportPos(parseRes, dep) if name == dep { // doh! file imports itself - handleImportCycle(t.h, span, []string{name}, dep) + handleImportCycle(t.h, pos, []string{name}, dep) return nil, t.h.Error() } res := t.e.compile(ctx, dep) // check for dependency cycle to prevent deadlock - if err := t.e.checkForDependencyCycle(res, []string{name, dep}, span, checked); err != nil { + if err := t.e.checkForDependencyCycle(res, []string{name, dep}, pos, checked); err != nil { return nil, err } results[i] = res } - deps = make([]linker.File, len(results)) + capacity := len(results) + if wantsDescriptorProto { + capacity++ + } + deps = make([]linker.File, len(results), capacity) var descriptorProtoRes *result if wantsDescriptorProto { descriptorProtoRes = t.e.compile(ctx, descriptorProtoPath) @@ -495,7 +479,7 @@ func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker. // it's usually considered immediately fatal. However, if the reason // we were resolving is due to an import, turn this into an error with // source position that pinpoints the import statement and report it. - return nil, reporter.Error(findImportSpan(parseRes, res.name), rerr) + return nil, reporter.Error(findImportPos(parseRes, res.name), rerr) } return nil, res.err } @@ -509,12 +493,13 @@ func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker. case <-descriptorProtoRes.ready: // descriptor.proto wasn't explicitly imported, so we can ignore a failure if descriptorProtoRes.err == nil { - overrideDescriptorProto = descriptorProtoRes.res + deps = append(deps, descriptorProtoRes.res) } case <-ctx.Done(): return nil, ctx.Err() } } + // all deps resolved t.r.setBlockedOn(nil) // reacquire semaphore so we can proceed @@ -524,10 +509,10 @@ func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker. t.released = false } - return t.link(parseRes, deps, overrideDescriptorProto) + return t.link(parseRes, deps) } -func (e *executor) checkForDependencyCycle(res *result, sequence []string, span ast.SourceSpan, checked map[string]struct{}) error { +func (e *executor) checkForDependencyCycle(res *result, sequence []string, pos ast.SourcePos, checked map[string]struct{}) error { if _, ok := checked[res.name]; ok { // already checked this one return nil @@ -538,7 +523,7 @@ func (e *executor) checkForDependencyCycle(res *result, sequence []string, span // is this a cycle? for _, file := range sequence { if file == dep { - handleImportCycle(e.h, span, sequence, dep) + handleImportCycle(e.h, pos, sequence, dep) return e.h.Error() } } @@ -549,83 +534,64 @@ func (e *executor) checkForDependencyCycle(res *result, sequence []string, span if depRes == nil { continue } - if err := e.checkForDependencyCycle(depRes, append(sequence, dep), span, checked); err != nil { + if err := e.checkForDependencyCycle(depRes, append(sequence, dep), pos, checked); err != nil { return err } } return nil } -func handleImportCycle(h *reporter.Handler, span ast.SourceSpan, importSequence []string, dep string) { +func handleImportCycle(h *reporter.Handler, pos ast.SourcePos, importSequence []string, dep string) { var buf bytes.Buffer buf.WriteString("cycle found in imports: ") for _, imp := range importSequence { - _, _ = fmt.Fprintf(&buf, "%q -> ", imp) + fmt.Fprintf(&buf, "%q -> ", imp) } - _, _ = fmt.Fprintf(&buf, "%q", dep) + fmt.Fprintf(&buf, "%q", dep) // error is saved and returned in caller - _ = h.HandleErrorWithPos(span, errors.New(buf.String())) + h.HandleErrorf(pos, buf.String()) //nolint:errcheck } -func findImportSpan(res parser.Result, dep string) ast.SourceSpan { +func findImportPos(res parser.Result, dep string) ast.SourcePos { root := res.AST() if root == nil { - return ast.UnknownSpan(res.FileNode().Name()) + return ast.UnknownPos(res.FileNode().Name()) } for _, decl := range root.Decls { if imp, ok := decl.(*ast.ImportNode); ok { if imp.Name.AsString() == dep { - return root.NodeInfo(imp.Name) + return root.NodeInfo(imp.Name).Start() } } } // this should never happen... - return ast.UnknownSpan(res.FileNode().Name()) + return ast.UnknownPos(res.FileNode().Name()) } -func (t *task) link(parseRes parser.Result, deps linker.Files, overrideDescriptorProtoRes linker.File) (linker.File, error) { +func (t *task) link(parseRes parser.Result, deps linker.Files) (linker.File, error) { file, err := linker.Link(parseRes, deps, t.e.sym, t.h) if err != nil { return nil, err } - - var interpretOpts []options.InterpreterOption - if overrideDescriptorProtoRes != nil { - interpretOpts = []options.InterpreterOption{options.WithOverrideDescriptorProto(overrideDescriptorProtoRes)} - } - - optsIndex, err := options.InterpretOptions(file, t.h, interpretOpts...) + optsIndex, err := options.InterpretOptions(file, t.h) if err != nil { return nil, err } // now that options are interpreted, we can do some additional checks - if err := file.ValidateOptions(t.h, t.e.sym); err != nil { + if err := file.ValidateOptions(t.h); err != nil { return nil, err } if t.r.explicitFile { file.CheckForUnusedImports(t.h) } - if err := t.h.Error(); err != nil { - return nil, err - } if needsSourceInfo(parseRes, t.e.c.SourceInfoMode) { - var srcInfoOpts []sourceinfo.GenerateOption - if t.e.c.SourceInfoMode&SourceInfoExtraComments != 0 { - srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraComments()) - } - if t.e.c.SourceInfoMode&SourceInfoExtraOptionLocations != 0 { - srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraOptionLocations()) + switch t.e.c.SourceInfoMode { + case SourceInfoStandard: + parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfo(parseRes.AST(), optsIndex) + case SourceInfoExtraComments: + parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfoWithExtraComments(parseRes.AST(), optsIndex) } - parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfo(parseRes.AST(), optsIndex, srcInfoOpts...) - } else if t.e.c.SourceInfoMode == SourceInfoNone { - // If results came from unlinked FileDescriptorProto, it could have - // source info that we should strip. - parseRes.FileDescriptorProto().SourceCodeInfo = nil - } - if len(parseRes.FileDescriptorProto().GetSourceCodeInfo().GetLocation()) > 0 { - // If we have source code info in the descriptor proto at this point, - // we have to build the index of locations. file.PopulateSourceCodeInfo() } diff --git a/vendor/github.com/bufbuild/protocompile/doc.go b/vendor/github.com/bufbuild/protocompile/doc.go index 40067b30..661ccc45 100644 --- a/vendor/github.com/bufbuild/protocompile/doc.go +++ b/vendor/github.com/bufbuild/protocompile/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/go.work b/vendor/github.com/bufbuild/protocompile/go.work index ba2d9c0d..de1c41a8 100644 --- a/vendor/github.com/bufbuild/protocompile/go.work +++ b/vendor/github.com/bufbuild/protocompile/go.work @@ -1,6 +1,7 @@ -go 1.21 +go 1.19 use ( . ./internal/benchmarks + ./internal/tools ) diff --git a/vendor/github.com/bufbuild/protocompile/go.work.sum b/vendor/github.com/bufbuild/protocompile/go.work.sum index d977cf05..6411d166 100644 --- a/vendor/github.com/bufbuild/protocompile/go.work.sum +++ b/vendor/github.com/bufbuild/protocompile/go.work.sum @@ -1,4 +1,3 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -11,40 +10,186 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/bufbuild/protocompile v0.2.1-0.20230123224550-da57cd758c2f/go.mod h1:tleDrpPTlLUVmgnEoN6qBliKWqJaZFJXqZdFjTd+ocU= -github.com/bufbuild/protocompile v0.13.0/go.mod h1:dr++fGGeMPWHv7jPeT06ZKukm45NJscd7rUxQVzEKRk= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -52,53 +197,53 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/jhump/gopoet v0.1.0 h1:gYjOPnzHd2nzB37xYQZxj4EIQNpBrBskRqQQ3q4ZgSg= -github.com/jhump/goprotoc v0.5.0 h1:Y1UgUX+txUznfqcGdDef8ZOVlyQvnV0pKWZH08RmZuo= -github.com/jhump/protoreflect v1.15.0 h1:U5T5/2LF0AZQFP9T4W5GfBjBaTruomrKobiR4E+oA/Q= -github.com/jhump/protoreflect v1.15.0/go.mod h1:qww51KYjD2hoCl/ohxw5cK2LSssFczrbO1t8Ld2TENs= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= +github.com/jhump/protoreflect v1.14.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= +github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -108,7 +253,15 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -128,29 +281,23 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= @@ -169,13 +316,23 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -210,7 +367,23 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -224,12 +397,12 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= diff --git a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go deleted file mode 100644 index ee054fa7..00000000 --- a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package editions contains helpers related to resolving features for -// Protobuf editions. These are lower-level helpers. Higher-level helpers -// (which use this package under the hood) can be found in the exported -// protoutil package. -package editions - -import ( - "fmt" - "strings" - "sync" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/types/descriptorpb" - "google.golang.org/protobuf/types/dynamicpb" -) - -const ( - // MinSupportedEdition is the earliest edition supported by this module. - // It should be 2023 (the first edition) for the indefinite future. - MinSupportedEdition = descriptorpb.Edition_EDITION_2023 - - // MaxSupportedEdition is the most recent edition supported by this module. - MaxSupportedEdition = descriptorpb.Edition_EDITION_2023 -) - -var ( - // SupportedEditions is the exhaustive set of editions that protocompile - // can support. We don't allow it to compile future/unknown editions, to - // make sure we don't generate incorrect descriptors, in the event that - // a future edition introduces a change or new feature that requires - // new logic in the compiler. - SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition) - - // FeatureSetDescriptor is the message descriptor for the compiled-in - // version (in the descriptorpb package) of the google.protobuf.FeatureSet - // message type. - FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor() - // FeatureSetType is the message type for the compiled-in version (in - // the descriptorpb package) of google.protobuf.FeatureSet. - FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type() - - editionDefaults map[descriptorpb.Edition]*descriptorpb.FeatureSet - editionDefaultsInit sync.Once -) - -// HasFeatures is implemented by all options messages and provides a -// nil-receiver-safe way of accessing the features explicitly configured -// in those options. -type HasFeatures interface { - GetFeatures() *descriptorpb.FeatureSet -} - -var _ HasFeatures = (*descriptorpb.FileOptions)(nil) -var _ HasFeatures = (*descriptorpb.MessageOptions)(nil) -var _ HasFeatures = (*descriptorpb.FieldOptions)(nil) -var _ HasFeatures = (*descriptorpb.OneofOptions)(nil) -var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil) -var _ HasFeatures = (*descriptorpb.EnumOptions)(nil) -var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil) -var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil) -var _ HasFeatures = (*descriptorpb.MethodOptions)(nil) - -// ResolveFeature resolves a feature for the given descriptor. This simple -// helper examines the given element and its ancestors, searching for an -// override. If there is no overridden value, it returns a zero value. -func ResolveFeature( - element protoreflect.Descriptor, - fields ...protoreflect.FieldDescriptor, -) (protoreflect.Value, error) { - for { - var features *descriptorpb.FeatureSet - if withFeatures, ok := element.Options().(HasFeatures); ok { - // It should not really be possible for 'ok' to ever be false... - features = withFeatures.GetFeatures() - } - - // TODO: adaptFeatureSet is only looking at the first field. But if we needed to - // support an extension field inside a custom feature, we'd really need - // to check all fields. That gets particularly complicated if the traversal - // path of fields includes list and map values. Luckily, features are not - // supposed to be repeated and not supposed to themselves have extensions. - // So this should be fine, at least for now. - msgRef, err := adaptFeatureSet(features, fields[0]) - if err != nil { - return protoreflect.Value{}, err - } - // Navigate the fields to find the value - var val protoreflect.Value - for i, field := range fields { - if i > 0 { - msgRef = val.Message() - } - if !msgRef.Has(field) { - val = protoreflect.Value{} - break - } - val = msgRef.Get(field) - } - if val.IsValid() { - // All fields were set! - return val, nil - } - - parent := element.Parent() - if parent == nil { - // We've reached the end of the inheritance chain. - return protoreflect.Value{}, nil - } - element = parent - } -} - -// HasEdition should be implemented by values that implement -// [protoreflect.FileDescriptor], to provide access to the file's -// edition when its syntax is [protoreflect.Editions]. -type HasEdition interface { - // Edition returns the numeric value of a google.protobuf.Edition enum - // value that corresponds to the edition of this file. If the file does - // not use editions, it should return the enum value that corresponds - // to the syntax level, EDITION_PROTO2 or EDITION_PROTO3. - Edition() int32 -} - -// GetEdition returns the edition for a given element. It returns -// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that -// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN -// if the syntax of the given element is not recognized or if the edition -// cannot be ascertained from the element's [protoreflect.FileDescriptor]. -func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition { - switch d.ParentFile().Syntax() { - case protoreflect.Proto2: - return descriptorpb.Edition_EDITION_PROTO2 - case protoreflect.Proto3: - return descriptorpb.Edition_EDITION_PROTO3 - case protoreflect.Editions: - withEdition, ok := d.ParentFile().(HasEdition) - if !ok { - // The parent file should always be a *result, so we should - // never be able to actually get in here. If we somehow did - // have another implementation of protoreflect.FileDescriptor, - // it doesn't provide a way to get the edition, other than the - // potentially expensive step of generating a FileDescriptorProto - // and then querying for the edition from that. :/ - return descriptorpb.Edition_EDITION_UNKNOWN - } - return descriptorpb.Edition(withEdition.Edition()) - default: - return descriptorpb.Edition_EDITION_UNKNOWN - } -} - -// GetEditionDefaults returns the default feature values for the given edition. -// It returns nil if the given edition is not known. -// -// This only populates known features, those that are fields of [*descriptorpb.FeatureSet]. -// It does not populate any extension fields. -// -// The returned value must not be mutated as it references shared package state. -func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet { - editionDefaultsInit.Do(func() { - editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name)) - // Compute default for all known editions in descriptorpb. - for editionInt := range descriptorpb.Edition_name { - edition := descriptorpb.Edition(editionInt) - defaults := &descriptorpb.FeatureSet{} - defaultsRef := defaults.ProtoReflect() - fields := defaultsRef.Descriptor().Fields() - // Note: we are not computing defaults for extensions. Those are not needed - // by anything in the compiler, so we can get away with just computing - // defaults for these static, non-extension fields. - for i, length := 0, fields.Len(); i < length; i++ { - field := fields.Get(i) - val, err := GetFeatureDefault(edition, FeatureSetType, field) - if err != nil { - // should we fail somehow?? - continue - } - defaultsRef.Set(field, val) - } - editionDefaults[edition] = defaults - } - }) - return editionDefaults[edition] -} - -// GetFeatureDefault computes the default value for a feature. The given container -// is the message type that contains the field. This should usually be the descriptor -// for google.protobuf.FeatureSet, but can be a different message for computing the -// default value of custom features. -// -// Note that this always re-computes the default. For known fields of FeatureSet, -// it is more efficient to query from the statically computed default messages, -// like so: -// -// editions.GetEditionDefaults(edition).ProtoReflect().Get(feature) -func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { - opts, ok := feature.Options().(*descriptorpb.FieldOptions) - if !ok { - // this is most likely impossible except for contrived use cases... - return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options()) - } - maxEdition := descriptorpb.Edition(-1) - var maxVal string - for _, def := range opts.EditionDefaults { - if def.GetEdition() <= edition && def.GetEdition() > maxEdition { - maxEdition = def.GetEdition() - maxVal = def.GetValue() - } - } - if maxEdition == -1 { - // no matching default found - return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition) - } - // We use a typed nil so that it won't fall back to the global registry. Features - // should not use extensions or google.protobuf.Any, so a nil *Types is fine. - unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)} - // The string value is in the text format: either a field value literal or a - // message literal. (Repeated and map features aren't supported, so there's no - // array or map literal syntax to worry about.) - if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind { - fldVal := container.Zero().NewField(feature) - err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface()) - if err != nil { - return protoreflect.Value{}, err - } - return fldVal, nil - } - // The value is the textformat for the field. But prototext doesn't provide a way - // to unmarshal a single field value. To work around, we unmarshal into an enclosing - // message, which means we must prefix the value with the field name. - if feature.IsExtension() { - maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal) - } else { - maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal) - } - empty := container.New() - err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface()) - if err != nil { - return protoreflect.Value{}, err - } - return empty.Get(feature), nil -} - -func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) { - msgRef := msg.ProtoReflect() - var actualField protoreflect.FieldDescriptor - switch { - case field.IsExtension(): - // Extensions can be used directly with the feature set, even if - // field.ContainingMessage() != FeatureSetDescriptor. But only if - // the value is either not a message or is a message with the - // right descriptor, i.e. val.Descriptor() == field.Message(). - if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field { - if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 { - return msgRef, nil - } - // The field is not present, but the message has unrecognized values. So - // let's try to parse the unrecognized bytes, just in case they contain - // this extension. - temp := &descriptorpb.FeatureSet{} - unmarshaler := proto.UnmarshalOptions{ - AllowPartial: true, - Resolver: resolverForExtension{field}, - } - if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil { - return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err) - } - return temp.ProtoReflect(), nil - } - case field.ContainingMessage() == FeatureSetDescriptor: - // Known field, not dynamically generated. Can directly use with the feature set. - return msgRef, nil - default: - actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number()) - } - - // If we get here, we have a dynamic field descriptor or an extension - // descriptor whose message type does not match the descriptor of the - // stored value. We need to copy its value into a dynamic message, - // which requires marshalling/unmarshalling. - // We only need to copy over the unrecognized bytes (if any) - // and the same field (if present). - data := msgRef.GetUnknown() - if actualField != nil && msgRef.Has(actualField) { - subset := &descriptorpb.FeatureSet{} - subset.ProtoReflect().Set(actualField, msgRef.Get(actualField)) - var err error - data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset) - if err != nil { - return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err) - } - } - if len(data) == 0 { - // No relevant data to copy over, so we can just return - // a zero value message - return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil - } - - other := dynamicpb.NewMessage(field.ContainingMessage()) - // We don't need to use a resolver for this step because we know that - // field is not an extension. And features are not allowed to themselves - // have extensions. - if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil { - return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err) - } - return other, nil -} - -type resolverForExtension struct { - ext protoreflect.ExtensionDescriptor -} - -func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) { - return nil, protoregistry.NotFound -} - -func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) { - return nil, protoregistry.NotFound -} - -func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if field == r.ext.FullName() { - return asExtensionType(r.ext), nil - } - return nil, protoregistry.NotFound -} - -func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() { - return asExtensionType(r.ext), nil - } - return nil, protoregistry.NotFound -} - -func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { - if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok { - return xtd.Type() - } - return dynamicpb.NewExtensionType(ext) -} - -func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition { - supportedEditions := map[string]descriptorpb.Edition{} - for editionNum := range descriptorpb.Edition_name { - edition := descriptorpb.Edition(editionNum) - if edition >= minEdition && edition <= maxEdition { - name := strings.TrimPrefix(edition.String(), "EDITION_") - supportedEditions[name] = edition - } - } - return supportedEditions -} - -// actualDescriptor returns the actual field descriptor referenced by msg that -// corresponds to the given ext (i.e. same number). It returns nil if msg has -// no reference, if the actual descriptor is the same as ext, or if ext is -// otherwise safe to use as is. -func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor { - if !msg.Has(ext) || ext.Message() == nil { - // nothing to match; safe as is - return nil - } - val := msg.Get(ext) - switch { - case ext.IsMap(): // should not actually be possible - expectedDescriptor := ext.MapValue().Message() - if expectedDescriptor == nil { - return nil // nothing to match - } - // We know msg.Has(field) is true, from above, so there's at least one entry. - var matches bool - val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool { - matches = val.Message().Descriptor() == expectedDescriptor - return false - }) - if matches { - return nil - } - case ext.IsList(): - // We know msg.Has(field) is true, from above, so there's at least one entry. - if val.List().Get(0).Message().Descriptor() == ext.Message() { - return nil - } - case !ext.IsMap(): - if val.Message().Descriptor() == ext.Message() { - return nil - } - } - // The underlying message descriptors do not match. So we need to return - // the actual field descriptor. Sadly, protoreflect.Message provides no way - // to query the field descriptor in a message by number. For non-extensions, - // one can query the associated message descriptor. But for extensions, we - // have to do the slow thing, and range through all fields looking for it. - var actualField protoreflect.FieldDescriptor - msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.Number() == ext.Number() { - actualField = fd - return false - } - return true - }) - return actualField -} diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset b/vendor/github.com/bufbuild/protocompile/internal/featuresext/cpp_features.protoset deleted file mode 100644 index 106ad8e4ae5e5b446c5da3a137d886518b93c99a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 605 zcmZ8f!EVz)5bY+RPB#)*%Kjy#8n1u z*r1}LmqU2SlW0?uYo|=h8vW|>!9?q~Tdnxk;iAA*l3vAWp)AT6%W54kU@!W|f7n;X zu37Jzu^YB0dd^fB{Pk`a7mu=}uo(RFez2dczw!KyH~h{P7YMBzv~EzlfiqNYjirOU zgvtIsIAu(UJ&Q}_N z9vl^JN=eM}vLV|VTw5$I(Sk|nVhBmfnx5rYNE=3e+w7#7d`hi~j1R0cdAW)mSqB}; z;qiDF>ScH@wQ$~07Q{Gyc`u@Y*qtw;li9l_Bns@)$@Dz9rHr~1QN&y4S~i+;{@+AA z<4;>*y!d=JjX%!b&rZ*0{*LJL#=~dzX>uC9@jHU^Z56+Jc*MK(6~yzfU3|M_`XJ$U T!Eh-&2>e!#*P diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go b/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go deleted file mode 100644 index 892524e6..00000000 --- a/vendor/github.com/bufbuild/protocompile/internal/featuresext/featuresext.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package featuresext provides file descriptors for the -// "google/protobuf/cpp_features.proto" and "google/protobuf/java_features.proto" -// standard import files. Unlike the other standard/well-known -// imports, these files have no standard Go package in their -// runtime with generated code. So in order to make them available -// as "standard imports" to compiler users, we must embed these -// descriptors into a Go package. -package featuresext - -import ( - _ "embed" - "fmt" - "sync" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/types/descriptorpb" -) - -var ( - //go:embed cpp_features.protoset - cppFeatures []byte - - //go:embed java_features.protoset - javaFeatures []byte - - initOnce sync.Once - initCppFeatures protoreflect.FileDescriptor - initCppErr error - initJavaFeatures protoreflect.FileDescriptor - initJavaErr error -) - -func initDescriptors() { - initOnce.Do(func() { - initCppFeatures, initCppErr = buildDescriptor("google/protobuf/cpp_features.proto", cppFeatures) - initJavaFeatures, initJavaErr = buildDescriptor("google/protobuf/java_features.proto", javaFeatures) - }) -} - -func CppFeaturesDescriptor() (protoreflect.FileDescriptor, error) { - initDescriptors() - return initCppFeatures, initCppErr -} - -func JavaFeaturesDescriptor() (protoreflect.FileDescriptor, error) { - initDescriptors() - return initJavaFeatures, initJavaErr -} - -func buildDescriptor(name string, data []byte) (protoreflect.FileDescriptor, error) { - var files descriptorpb.FileDescriptorSet - err := proto.Unmarshal(data, &files) - if err != nil { - return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err) - } - if len(files.File) != 1 { - return nil, fmt.Errorf("failed to load descriptor for %q: expected embedded descriptor set to contain exactly one file but it instead has %d", name, len(files.File)) - } - if files.File[0].GetName() != name { - return nil, fmt.Errorf("failed to load descriptor for %q: embedded descriptor contains wrong file %q", name, files.File[0].GetName()) - } - descriptor, err := protodesc.NewFile(files.File[0], protoregistry.GlobalFiles) - if err != nil { - return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err) - } - return descriptor, nil -} diff --git a/vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset b/vendor/github.com/bufbuild/protocompile/internal/featuresext/java_features.protoset deleted file mode 100644 index 60de3eb758e1fbfef09eca85deb2a370af727d6a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 856 zcmbtTF>ljA6y~C|F4w9UGZ3`{Pi5&+7ezv<*b``K~1_^i8gr2YdY z=Cbw=fVwfU@&}k%5R`wx*-mK4%6RX-_kG`e?;ZWNP9MfX#2gPZDYWqO=K=wD< z-^n{dE=c`?kV|sgscV^I* z)aMz3xzI+r)Cw_5(h{tS;#47@p#sUwL7ECn3wV?u50yKxE4NR}r+183=VOiD$#rz( z%@|KYR;V^=CXMap12^;C6i@ER_hd>IY4meBWoW)q0S_)xd z${lPMc5j}&*QMM42I1_|`=ep^$Q!p#ddIC{@3ilY`p^5Pulx2bXOk|1v57G;vCv93kGqE_kZnehM%9?$5=iB-Ub>p{XX0h3{S7$LOd)xc;ULdBc1ns-aV|!VA Fqdy8TAUXg5 diff --git a/vendor/github.com/bufbuild/protocompile/internal/message_context.go b/vendor/github.com/bufbuild/protocompile/internal/message_context.go index 52acbdfd..134a05d0 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/message_context.go +++ b/vendor/github.com/bufbuild/protocompile/internal/message_context.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go b/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go deleted file mode 100644 index 850a0c66..00000000 --- a/vendor/github.com/bufbuild/protocompile/internal/messageset/messageset.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package messageset - -import ( - "math" - "sync" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/types/descriptorpb" -) - -var ( - messageSetSupport bool - messageSetSupportInit sync.Once -) - -// CanSupportMessageSets returns true if the protobuf-go runtime supports -// serializing messages with the message set wire format. -func CanSupportMessageSets() bool { - messageSetSupportInit.Do(func() { - // We check using the protodesc package, instead of just relying - // on protolegacy build tag, in case someone links in a fork of - // the protobuf-go runtime that supports legacy proto1 features - // or in case the protobuf-go runtime adds another mechanism to - // enable or disable it (such as environment variable). - _, err := protodesc.NewFile(&descriptorpb.FileDescriptorProto{ - Name: proto.String("test.proto"), - MessageType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("MessageSet"), - Options: &descriptorpb.MessageOptions{ - MessageSetWireFormat: proto.Bool(true), - }, - ExtensionRange: []*descriptorpb.DescriptorProto_ExtensionRange{ - { - Start: proto.Int32(1), - End: proto.Int32(math.MaxInt32), - }, - }, - }, - }, - }, nil) - // When message sets are not supported, the above returns an error: - // message "MessageSet" is a MessageSet, which is a legacy proto1 feature that is no longer supported - messageSetSupport = err == nil - }) - return messageSetSupport -} diff --git a/vendor/github.com/bufbuild/protocompile/internal/norace.go b/vendor/github.com/bufbuild/protocompile/internal/norace.go index 2acf4e46..ada2f5cb 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/norace.go +++ b/vendor/github.com/bufbuild/protocompile/internal/norace.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/internal/options.go b/vendor/github.com/bufbuild/protocompile/internal/options.go index 4eaa0f6a..5586802b 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/options.go +++ b/vendor/github.com/bufbuild/protocompile/internal/options.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/reporter" ) type hasOptionNode interface { @@ -25,34 +26,21 @@ type hasOptionNode interface { FileNode() ast.FileDeclNode // needed in order to query for NodeInfo } -type errorHandler func(span ast.SourceSpan, format string, args ...interface{}) error - -func FindFirstOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { - return findOption(res, handler, scope, opts, name, false, true) -} - -func FindOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { - return findOption(res, handler, scope, opts, name, true, false) -} - -func findOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string, exact, first bool) (int, error) { +func FindOption(res hasOptionNode, handler *reporter.Handler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) { found := -1 for i, opt := range opts { - if exact && len(opt.Name) != 1 { + if len(opt.Name) != 1 { continue } if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name { continue } - if first { - return i, nil - } if found >= 0 { optNode := res.OptionNode(opt) fn := res.FileNode() node := optNode.GetName() nodeInfo := fn.NodeInfo(node) - return -1, handler(nodeInfo, "%s: option %s cannot be defined more than once", scope, name) + return -1, handler.HandleErrorf(nodeInfo.Start(), "%s: option %s cannot be defined more than once", scope, name) } found = i } diff --git a/vendor/github.com/bufbuild/protocompile/internal/race.go b/vendor/github.com/bufbuild/protocompile/internal/race.go index e70e414d..4a458a3f 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/race.go +++ b/vendor/github.com/bufbuild/protocompile/internal/race.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/internal/tags.go b/vendor/github.com/bufbuild/protocompile/internal/tags.go index 179728f0..7e674c17 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/tags.go +++ b/vendor/github.com/bufbuild/protocompile/internal/tags.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -61,12 +61,6 @@ const ( // FileOptionsTag is the tag number of the options element in a file // descriptor proto. FileOptionsTag = 8 - // FileOptionsJavaStringCheckUTF8Tag is the tag number of the java_string_check_utf8 - // field in the FileOptions proto. - FileOptionsJavaStringCheckUTF8Tag = 27 - // FileOptionsFeaturesTag is the tag number of the features field in the - // FileOptions proto. - FileOptionsFeaturesTag = 50 // FileSourceCodeInfoTag is the tag number of the source code info element // in a file descriptor proto. FileSourceCodeInfoTag = 9 @@ -79,9 +73,6 @@ const ( // FileSyntaxTag is the tag number of the syntax element in a file // descriptor proto. FileSyntaxTag = 12 - // FileEditionTag is the tag number of the edition element in a file - // descriptor proto. - FileEditionTag = 14 // MessageNameTag is the tag number of the name element in a message // descriptor proto. MessageNameTag = 1 @@ -103,12 +94,9 @@ const ( // MessageOptionsTag is the tag number of the options element in a message // descriptor proto. MessageOptionsTag = 7 - // MessageOptionsFeaturesTag is the tag number of the features field in the - // MessageOptions proto. - MessageOptionsFeaturesTag = 12 - // MessageOneofsTag is the tag number of the one-ofs element in a message + // MessageOneOfsTag is the tag number of the one-ofs element in a message // descriptor proto. - MessageOneofsTag = 8 + MessageOneOfsTag = 8 // MessageReservedRangesTag is the tag number of the reserved ranges element // in a message descriptor proto. MessageReservedRangesTag = 9 @@ -124,30 +112,6 @@ const ( // ExtensionRangeOptionsTag is the tag number of the options element in an // extension range proto. ExtensionRangeOptionsTag = 3 - // ExtensionRangeOptionsDeclarationTag is the tag number of the declaration - // field in the ExtensionRangeOptions proto. - ExtensionRangeOptionsDeclarationTag = 2 - // ExtensionRangeOptionsVerificationTag is the tag number of the verification - // field in the ExtensionRangeOptions proto. - ExtensionRangeOptionsVerificationTag = 3 - // ExtensionRangeOptionsDeclarationNumberTag is the tag number of the number - // field in the ExtensionRangeOptions.Declaration proto. - ExtensionRangeOptionsDeclarationNumberTag = 1 - // ExtensionRangeOptionsDeclarationFullNameTag is the tag number of the full_name - // field in the ExtensionRangeOptions.Declaration proto. - ExtensionRangeOptionsDeclarationFullNameTag = 2 - // ExtensionRangeOptionsDeclarationTypeTag is the tag number of the type - // field in the ExtensionRangeOptions.Declaration proto. - ExtensionRangeOptionsDeclarationTypeTag = 3 - // ExtensionRangeOptionsDeclarationReservedTag is the tag number of the reserved - // field in the ExtensionRangeOptions.Declaration proto. - ExtensionRangeOptionsDeclarationReservedTag = 5 - // ExtensionRangeOptionsDeclarationRepeatedTag is the tag number of the repeated - // field in the ExtensionRangeOptions.Declaration proto. - ExtensionRangeOptionsDeclarationRepeatedTag = 6 - // ExtensionRangeOptionsFeaturesTag is the tag number of the features field in the - // ExtensionRangeOptions proto. - ExtensionRangeOptionsFeaturesTag = 50 // ReservedRangeStartTag is the tag number of the start index in a reserved // range proto. This field number is the same for both "flavors" of reserved // ranges: DescriptorProto.ReservedRange and EnumDescriptorProto.EnumReservedRange. @@ -180,24 +144,6 @@ const ( // FieldOptionsTag is the tag number of the options element in a field // descriptor proto. FieldOptionsTag = 8 - // FieldOptionsCTypeTag is the number of the ctype field in the - // FieldOptions proto. - FieldOptionsCTypeTag = 1 - // FieldOptionsPackedTag is the number of the packed field in the - // FieldOptions proto. - FieldOptionsPackedTag = 2 - // FieldOptionsLazyTag is the number of the lazy field in the - // FieldOptions proto. - FieldOptionsLazyTag = 5 - // FieldOptionsJSTypeTag is the number of the jstype field in the - // FieldOptions proto. - FieldOptionsJSTypeTag = 6 - // FieldOptionsUnverifiedLazyTag is the number of the unverified_lazy - // field in the FieldOptions proto. - FieldOptionsUnverifiedLazyTag = 15 - // FieldOptionsFeaturesTag is the tag number of the features field in the - // FieldOptions proto. - FieldOptionsFeaturesTag = 21 // FieldOneofIndexTag is the tag number of the oneof index element in a // field descriptor proto. FieldOneofIndexTag = 9 @@ -207,15 +153,12 @@ const ( // FieldProto3OptionalTag is the tag number of the proto3_optional element // in a descriptor proto. FieldProto3OptionalTag = 17 - // OneofNameTag is the tag number of the name element in a one-of + // OneOfNameTag is the tag number of the name element in a one-of // descriptor proto. - OneofNameTag = 1 - // OneofOptionsTag is the tag number of the options element in a one-of + OneOfNameTag = 1 + // OneOfOptionsTag is the tag number of the options element in a one-of // descriptor proto. - OneofOptionsTag = 2 - // OneofOptionsFeaturesTag is the tag number of the features field in the - // OneofOptions proto. - OneofOptionsFeaturesTag = 1 + OneOfOptionsTag = 2 // EnumNameTag is the tag number of the name element in an enum descriptor // proto. EnumNameTag = 1 @@ -225,9 +168,6 @@ const ( // EnumOptionsTag is the tag number of the options element in an enum // descriptor proto. EnumOptionsTag = 3 - // EnumOptionsFeaturesTag is the tag number of the features field in the - // EnumOptions proto. - EnumOptionsFeaturesTag = 7 // EnumReservedRangesTag is the tag number of the reserved ranges element in // an enum descriptor proto. EnumReservedRangesTag = 4 @@ -243,9 +183,6 @@ const ( // EnumValOptionsTag is the tag number of the options element in an enum // value descriptor proto. EnumValOptionsTag = 3 - // EnumValOptionsFeaturesTag is the tag number of the features field in the - // EnumValueOptions proto. - EnumValOptionsFeaturesTag = 2 // ServiceNameTag is the tag number of the name element in a service // descriptor proto. ServiceNameTag = 1 @@ -255,9 +192,6 @@ const ( // ServiceOptionsTag is the tag number of the options element in a service // descriptor proto. ServiceOptionsTag = 3 - // ServiceOptionsFeaturesTag is the tag number of the features field in the - // ServiceOptions proto. - ServiceOptionsFeaturesTag = 34 // MethodNameTag is the tag number of the name element in a method // descriptor proto. MethodNameTag = 1 @@ -270,9 +204,6 @@ const ( // MethodOptionsTag is the tag number of the options element in a method // descriptor proto. MethodOptionsTag = 4 - // MethodOptionsFeaturesTag is the tag number of the features field in the - // MethodOptions proto. - MethodOptionsFeaturesTag = 35 // MethodInputStreamTag is the tag number of the input stream flag in a // method descriptor proto. MethodInputStreamTag = 5 @@ -309,28 +240,4 @@ const ( // UninterpretedNameNameTag is the tag number of the name element in an // uninterpreted option name proto. UninterpretedNameNameTag = 1 - - // AnyTypeURLTag is the tag number of the type_url field of the Any proto. - AnyTypeURLTag = 1 - // AnyValueTag is the tag number of the value field of the Any proto. - AnyValueTag = 2 - - // FeatureSetFieldPresenceTag is the tag number of the field_presence field - // in the FeatureSet proto. - FeatureSetFieldPresenceTag = 1 - // FeatureSetEnumTypeTag is the tag number of the enum_type field in the - // FeatureSet proto. - FeatureSetEnumTypeTag = 2 - // FeatureSetRepeatedFieldEncodingTag is the tag number of the repeated_field_encoding - // field in the FeatureSet proto. - FeatureSetRepeatedFieldEncodingTag = 3 - // FeatureSetUTF8ValidationTag is the tag number of the utf8_validation field - // in the FeatureSet proto. - FeatureSetUTF8ValidationTag = 4 - // FeatureSetMessageEncodingTag is the tag number of the message_encoding - // field in the FeatureSet proto. - FeatureSetMessageEncodingTag = 5 - // FeatureSetJSONFormatTag is the tag number of the json_format field in - // the FeatureSet proto. - FeatureSetJSONFormatTag = 6 ) diff --git a/vendor/github.com/bufbuild/protocompile/internal/types.go b/vendor/github.com/bufbuild/protocompile/internal/types.go index 04090a82..5e54e4af 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/types.go +++ b/vendor/github.com/bufbuild/protocompile/internal/types.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/internal/util.go b/vendor/github.com/bufbuild/protocompile/internal/util.go index 569cb3f1..f01513ea 100644 --- a/vendor/github.com/bufbuild/protocompile/internal/util.go +++ b/vendor/github.com/bufbuild/protocompile/internal/util.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ import ( "bytes" "unicode" "unicode/utf8" - - "google.golang.org/protobuf/reflect/protoreflect" ) // JSONName returns the default JSON name for a field with the given name. @@ -119,126 +117,3 @@ func WriteEscapedBytes(buf *bytes.Buffer, b []byte) { } } } - -// IsZeroLocation returns true if the given loc is a zero value -// (which is returned from queries that have no result). -func IsZeroLocation(loc protoreflect.SourceLocation) bool { - return loc.Path == nil && - loc.StartLine == 0 && - loc.StartColumn == 0 && - loc.EndLine == 0 && - loc.EndColumn == 0 && - loc.LeadingDetachedComments == nil && - loc.LeadingComments == "" && - loc.TrailingComments == "" && - loc.Next == 0 -} - -// ComputePath computes the source location path for the given descriptor. -// The boolean value indicates whether the result is valid. If the path -// cannot be computed for d, the function returns nil, false. -func ComputePath(d protoreflect.Descriptor) (protoreflect.SourcePath, bool) { - _, ok := d.(protoreflect.FileDescriptor) - if ok { - return nil, true - } - var path protoreflect.SourcePath - for { - p := d.Parent() - switch d := d.(type) { - case protoreflect.FileDescriptor: - return reverse(path), true - case protoreflect.MessageDescriptor: - path = append(path, int32(d.Index())) - switch p.(type) { - case protoreflect.FileDescriptor: - path = append(path, FileMessagesTag) - case protoreflect.MessageDescriptor: - path = append(path, MessageNestedMessagesTag) - default: - return nil, false - } - case protoreflect.FieldDescriptor: - path = append(path, int32(d.Index())) - switch p.(type) { - case protoreflect.FileDescriptor: - if d.IsExtension() { - path = append(path, FileExtensionsTag) - } else { - return nil, false - } - case protoreflect.MessageDescriptor: - if d.IsExtension() { - path = append(path, MessageExtensionsTag) - } else { - path = append(path, MessageFieldsTag) - } - default: - return nil, false - } - case protoreflect.OneofDescriptor: - path = append(path, int32(d.Index())) - if _, ok := p.(protoreflect.MessageDescriptor); ok { - path = append(path, MessageOneofsTag) - } else { - return nil, false - } - case protoreflect.EnumDescriptor: - path = append(path, int32(d.Index())) - switch p.(type) { - case protoreflect.FileDescriptor: - path = append(path, FileEnumsTag) - case protoreflect.MessageDescriptor: - path = append(path, MessageEnumsTag) - default: - return nil, false - } - case protoreflect.EnumValueDescriptor: - path = append(path, int32(d.Index())) - if _, ok := p.(protoreflect.EnumDescriptor); ok { - path = append(path, EnumValuesTag) - } else { - return nil, false - } - case protoreflect.ServiceDescriptor: - path = append(path, int32(d.Index())) - if _, ok := p.(protoreflect.FileDescriptor); ok { - path = append(path, FileServicesTag) - } else { - return nil, false - } - case protoreflect.MethodDescriptor: - path = append(path, int32(d.Index())) - if _, ok := p.(protoreflect.ServiceDescriptor); ok { - path = append(path, ServiceMethodsTag) - } else { - return nil, false - } - } - d = p - } -} - -// CanPack returns true if a repeated field of the given kind -// can use packed encoding. -func CanPack(k protoreflect.Kind) bool { - switch k { - case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.StringKind, protoreflect.BytesKind: - return false - default: - return true - } -} - -func ClonePath(path protoreflect.SourcePath) protoreflect.SourcePath { - clone := make(protoreflect.SourcePath, len(path)) - copy(clone, path) - return clone -} - -func reverse(p protoreflect.SourcePath) protoreflect.SourcePath { - for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { - p[i], p[j] = p[j], p[i] - } - return p -} diff --git a/vendor/github.com/bufbuild/protocompile/linker/descriptors.go b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go index cd43dcce..20968936 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/descriptors.go +++ b/vendor/github.com/bufbuild/protocompile/linker/descriptors.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,130 +15,23 @@ package linker import ( + "bytes" "fmt" "strconv" "strings" "unicode/utf8" "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/dynamicpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" - "github.com/bufbuild/protocompile/internal/editions" "github.com/bufbuild/protocompile/parser" "github.com/bufbuild/protocompile/protoutil" ) -var ( - // These "noOp*" values are all descriptors. The protoreflect.Descriptor - // interface and its sub-interfaces are all marked with an unexported - // method so that they cannot be implemented outside of the google.golang.org/protobuf - // module. So, to provide implementations from this package, we must embed - // them. If we simply left the embedded interface field nil, then if/when - // new methods are added to the interfaces, it could induce panics in this - // package or users of this module (since trying to invoke one of these new - // methods would end up trying to call a method on a nil interface value). - // - // So instead of leaving the embedded interface fields nil, we embed an actual - // value. While new methods are unlikely to return the correct value (since - // the calls will be delegated to these no-op instances), it is a less - // dangerous latent bug than inducing a nil-dereference panic. - - noOpFile protoreflect.FileDescriptor - noOpMessage protoreflect.MessageDescriptor - noOpOneof protoreflect.OneofDescriptor - noOpField protoreflect.FieldDescriptor - noOpEnum protoreflect.EnumDescriptor - noOpEnumValue protoreflect.EnumValueDescriptor - noOpExtension protoreflect.ExtensionDescriptor - noOpService protoreflect.ServiceDescriptor - noOpMethod protoreflect.MethodDescriptor -) - -var ( - fieldPresenceField = editions.FeatureSetDescriptor.Fields().ByName("field_presence") - repeatedFieldEncodingField = editions.FeatureSetDescriptor.Fields().ByName("repeated_field_encoding") - messageEncodingField = editions.FeatureSetDescriptor.Fields().ByName("message_encoding") - enumTypeField = editions.FeatureSetDescriptor.Fields().ByName("enum_type") - jsonFormatField = editions.FeatureSetDescriptor.Fields().ByName("json_format") -) - -func init() { - noOpFile, _ = protodesc.NewFile( - &descriptorpb.FileDescriptorProto{ - Name: proto.String("no-op.proto"), - Syntax: proto.String("proto2"), - Dependency: []string{"google/protobuf/descriptor.proto"}, - MessageType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("NoOpMsg"), - Field: []*descriptorpb.FieldDescriptorProto{ - { - Name: proto.String("no_op"), - Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), - Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), - Number: proto.Int32(1), - JsonName: proto.String("noOp"), - OneofIndex: proto.Int32(0), - }, - }, - OneofDecl: []*descriptorpb.OneofDescriptorProto{ - { - Name: proto.String("no_op_oneof"), - }, - }, - }, - }, - EnumType: []*descriptorpb.EnumDescriptorProto{ - { - Name: proto.String("NoOpEnum"), - Value: []*descriptorpb.EnumValueDescriptorProto{ - { - Name: proto.String("NO_OP"), - Number: proto.Int32(0), - }, - }, - }, - }, - Extension: []*descriptorpb.FieldDescriptorProto{ - { - Extendee: proto.String(".google.protobuf.FileOptions"), - Name: proto.String("no_op"), - Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), - Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), - Number: proto.Int32(50000), - }, - }, - Service: []*descriptorpb.ServiceDescriptorProto{ - { - Name: proto.String("NoOpService"), - Method: []*descriptorpb.MethodDescriptorProto{ - { - Name: proto.String("NoOp"), - InputType: proto.String(".NoOpMsg"), - OutputType: proto.String(".NoOpMsg"), - }, - }, - }, - }, - }, - protoregistry.GlobalFiles, - ) - noOpMessage = noOpFile.Messages().Get(0) - noOpOneof = noOpMessage.Oneofs().Get(0) - noOpField = noOpMessage.Fields().Get(0) - noOpEnum = noOpFile.Enums().Get(0) - noOpEnumValue = noOpEnum.Values().Get(0) - noOpExtension = noOpFile.Extensions().Get(0) - noOpService = noOpFile.Services().Get(0) - noOpMethod = noOpService.Methods().Get(0) -} - // This file contains implementations of protoreflect.Descriptor. Note that // this is a hack since those interfaces have a "doNotImplement" tag // interface therein. We do just enough to make dynamicpb happy; constructing @@ -160,6 +53,11 @@ type result struct { // interpreting options. usedImports map[string]struct{} + // A map of descriptor options messages to their pre-serialized bytes (using + // a canonical serialization format based on how protoc renders options to + // bytes). + optionBytes map[proto.Message][]byte + // A map of AST nodes that represent identifiers in ast.FieldReferenceNodes // to their fully-qualified name. The identifiers are for field names in // message literals (in option values) that are extension fields. These names @@ -177,7 +75,6 @@ type result struct { var _ protoreflect.FileDescriptor = (*result)(nil) var _ Result = (*result)(nil) var _ protoutil.DescriptorProtoWrapper = (*result)(nil) -var _ editions.HasEdition = (*result)(nil) func (r *result) RemoveAST() { r.Result = parser.ResultWithoutAST(r.FileDescriptorProto()) @@ -206,26 +103,11 @@ func (r *result) Syntax() protoreflect.Syntax { return protoreflect.Proto2 case "proto3": return protoreflect.Proto3 - case "editions": - return protoreflect.Editions default: return 0 // ??? } } -func (r *result) Edition() int32 { - switch r.Syntax() { - case protoreflect.Proto2: - return int32(descriptorpb.Edition_EDITION_PROTO2) - case protoreflect.Proto3: - return int32(descriptorpb.Edition_EDITION_PROTO3) - case protoreflect.Editions: - return int32(r.FileDescriptorProto().GetEdition()) - default: - return int32(descriptorpb.Edition_EDITION_UNKNOWN) // ??? - } -} - func (r *result) Name() protoreflect.Name { return "" } @@ -292,7 +174,7 @@ func computeSourceLocIndex(locs []protoreflect.SourceLocation) map[interface{}]i func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) []protoreflect.SourceLocation { locs := make([]protoreflect.SourceLocation, len(srcInfoProtos)) - prev := map[any]*protoreflect.SourceLocation{} + prev := map[string]*protoreflect.SourceLocation{} for i, loc := range srcInfoProtos { var stLin, stCol, enLin, enCol int if len(loc.Span) == 3 { @@ -311,7 +193,7 @@ func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) [] EndLine: enLin, EndColumn: enCol, } - str := pathKey(loc.Path) + str := pathStr(loc.Path) pr := prev[str] if pr != nil { pr.Next = i @@ -321,6 +203,148 @@ func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) [] return locs } +func pathStr(p protoreflect.SourcePath) string { + var buf bytes.Buffer + for _, v := range p { + fmt.Fprintf(&buf, "%x:", v) + } + return buf.String() +} + +// AddOptionBytes associates the given opts (an options message encoded in the +// binary format) with the given options protobuf message. The protobuf message +// should exist in the hierarchy of this result's FileDescriptorProto. This +// allows the FileDescriptorProto to be marshaled to bytes in a way that +// preserves the way options are defined in source (just as is done by protoc, +// but not possible when only using the generated Go types and standard +// marshaling APIs in the protobuf runtime). +func (r *result) AddOptionBytes(pm proto.Message, opts []byte) { + if r.optionBytes == nil { + r.optionBytes = map[proto.Message][]byte{} + } + r.optionBytes[pm] = append(r.optionBytes[pm], opts...) +} + +func (r *result) CanonicalProto() *descriptorpb.FileDescriptorProto { + origFd := r.FileDescriptorProto() + // make a copy that we can mutate + fd := proto.Clone(origFd).(*descriptorpb.FileDescriptorProto) //nolint:errcheck + + r.storeOptionBytesInFile(fd, origFd) + + return fd +} + +func (r *result) storeOptionBytesInFile(fd, origFd *descriptorpb.FileDescriptorProto) { + if fd.Options != nil { + fd.Options.Reset() + fd.Options.ProtoReflect().SetUnknown(r.optionBytes[origFd.Options]) + } + + for i, md := range fd.MessageType { + origMd := origFd.MessageType[i] + r.storeOptionBytesInMessage(md, origMd) + } + + for i, ed := range fd.EnumType { + origEd := origFd.EnumType[i] + r.storeOptionBytesInEnum(ed, origEd) + } + + for i, exd := range fd.Extension { + origExd := origFd.Extension[i] + r.storeOptionBytesInField(exd, origExd) + } + + for i, sd := range fd.Service { + origSd := origFd.Service[i] + if sd.Options != nil { + sd.Options.Reset() + sd.Options.ProtoReflect().SetUnknown(r.optionBytes[origSd.Options]) + } + + for j, mtd := range sd.Method { + origMtd := origSd.Method[j] + if mtd.Options != nil { + mtd.Options.Reset() + mtd.Options.ProtoReflect().SetUnknown(r.optionBytes[origMtd.Options]) + } + } + } +} + +func (r *result) storeOptionBytesInMessage(md, origMd *descriptorpb.DescriptorProto) { + if md.GetOptions().GetMapEntry() { + // Map entry messages are synthesized. They won't have any option bytes + // since they don't actually appear in the source and thus have any option + // declarations in the source. + return + } + + if md.Options != nil { + md.Options.Reset() + md.Options.ProtoReflect().SetUnknown(r.optionBytes[origMd.Options]) + } + + for i, fld := range md.Field { + origFld := origMd.Field[i] + r.storeOptionBytesInField(fld, origFld) + } + + for i, ood := range md.OneofDecl { + origOod := origMd.OneofDecl[i] + if ood.Options != nil { + ood.Options.Reset() + ood.Options.ProtoReflect().SetUnknown(r.optionBytes[origOod.Options]) + } + } + + for i, exr := range md.ExtensionRange { + origExr := origMd.ExtensionRange[i] + if exr.Options != nil { + exr.Options.Reset() + exr.Options.ProtoReflect().SetUnknown(r.optionBytes[origExr.Options]) + } + } + + for i, nmd := range md.NestedType { + origNmd := origMd.NestedType[i] + r.storeOptionBytesInMessage(nmd, origNmd) + } + + for i, ed := range md.EnumType { + origEd := origMd.EnumType[i] + r.storeOptionBytesInEnum(ed, origEd) + } + + for i, exd := range md.Extension { + origExd := origMd.Extension[i] + r.storeOptionBytesInField(exd, origExd) + } +} + +func (r *result) storeOptionBytesInEnum(ed, origEd *descriptorpb.EnumDescriptorProto) { + if ed.Options != nil { + ed.Options.Reset() + ed.Options.ProtoReflect().SetUnknown(r.optionBytes[origEd.Options]) + } + + for i, evd := range ed.Value { + origEvd := origEd.Value[i] + if evd.Options != nil { + evd.Options.Reset() + evd.Options.ProtoReflect().SetUnknown(r.optionBytes[origEvd.Options]) + } + } +} + +func (r *result) storeOptionBytesInField(fld, origFld *descriptorpb.FieldDescriptorProto) { + if fld.Options != nil { + fld.Options.Reset() + fld.Options.ProtoReflect().SetUnknown(r.optionBytes[origFld.Options]) + } +} + type fileImports struct { protoreflect.FileImports files []protoreflect.FileImport @@ -331,7 +355,7 @@ func (r *result) createImports() fileImports { imps := make([]protoreflect.FileImport, len(fd.Dependency)) for i, dep := range fd.Dependency { desc := r.deps.FindFileByPath(dep) - imps[i] = protoreflect.FileImport{FileDescriptor: unwrap(desc)} + imps[i] = protoreflect.FileImport{FileDescriptor: desc} } for _, publicIndex := range fd.PublicDependency { imps[int(publicIndex)].IsPublic = true @@ -342,20 +366,6 @@ func (r *result) createImports() fileImports { return fileImports{files: imps} } -func unwrap(descriptor protoreflect.FileDescriptor) protoreflect.FileDescriptor { - wrapped, ok := descriptor.(interface { - Unwrap() protoreflect.FileDescriptor - }) - if !ok { - return descriptor - } - unwrapped := wrapped.Unwrap() - if unwrapped == nil { - return descriptor // shouldn't ever happen - } - return unwrapped -} - func (f *fileImports) Len() int { return len(f.files) } @@ -391,22 +401,111 @@ func (s *srcLocs) ByDescriptor(d protoreflect.Descriptor) protoreflect.SourceLoc if d.ParentFile() != s.file { return protoreflect.SourceLocation{} } - path, ok := internal.ComputePath(d) + path, ok := computePath(d) if !ok { return protoreflect.SourceLocation{} } return s.ByPath(path) } +func computePath(d protoreflect.Descriptor) (protoreflect.SourcePath, bool) { + _, ok := d.(protoreflect.FileDescriptor) + if ok { + return nil, true + } + var path protoreflect.SourcePath + for { + p := d.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return reverse(path), true + case protoreflect.MessageDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, internal.FileMessagesTag) + case protoreflect.MessageDescriptor: + path = append(path, internal.MessageNestedMessagesTag) + default: + return nil, false + } + case protoreflect.FieldDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + if d.IsExtension() { + path = append(path, internal.FileExtensionsTag) + } else { + return nil, false + } + case protoreflect.MessageDescriptor: + if d.IsExtension() { + path = append(path, internal.MessageExtensionsTag) + } else { + path = append(path, internal.MessageFieldsTag) + } + default: + return nil, false + } + case protoreflect.OneofDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.MessageDescriptor); ok { + path = append(path, internal.MessageOneOfsTag) + } else { + return nil, false + } + case protoreflect.EnumDescriptor: + path = append(path, int32(d.Index())) + switch p.(type) { + case protoreflect.FileDescriptor: + path = append(path, internal.FileEnumsTag) + case protoreflect.MessageDescriptor: + path = append(path, internal.MessageEnumsTag) + default: + return nil, false + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.EnumDescriptor); ok { + path = append(path, internal.EnumValuesTag) + } else { + return nil, false + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.FileDescriptor); ok { + path = append(path, internal.FileServicesTag) + } else { + return nil, false + } + case protoreflect.MethodDescriptor: + path = append(path, int32(d.Index())) + if _, ok := p.(protoreflect.ServiceDescriptor); ok { + path = append(path, internal.ServiceMethodsTag) + } else { + return nil, false + } + } + d = p + } +} + +func reverse(p protoreflect.SourcePath) protoreflect.SourcePath { + for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { + p[i], p[j] = p[j], p[i] + } + return p +} + type msgDescriptors struct { protoreflect.MessageDescriptors - msgs []msgDescriptor + msgs []*msgDescriptor } -func (r *result) createMessages(prefix string, parent protoreflect.Descriptor, msgProtos []*descriptorpb.DescriptorProto, pool *allocPool) msgDescriptors { - msgs := pool.getMessages(len(msgProtos)) +func (r *result) createMessages(prefix string, parent protoreflect.Descriptor, msgProtos []*descriptorpb.DescriptorProto) msgDescriptors { + msgs := make([]*msgDescriptor, len(msgProtos)) for i, msgProto := range msgProtos { - r.createMessageDescriptor(&msgs[i], msgProto, parent, i, prefix+msgProto.GetName(), pool) + msgs[i] = r.createMessageDescriptor(msgProto, parent, i, prefix+msgProto.GetName()) } return msgDescriptors{msgs: msgs} } @@ -416,12 +515,11 @@ func (m *msgDescriptors) Len() int { } func (m *msgDescriptors) Get(i int) protoreflect.MessageDescriptor { - return &m.msgs[i] + return m.msgs[i] } func (m *msgDescriptors) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { - for i := range m.msgs { - msg := &m.msgs[i] + for _, msg := range m.msgs { if msg.Name() == s { return msg } @@ -451,27 +549,23 @@ type msgDescriptor struct { var _ protoreflect.MessageDescriptor = (*msgDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*msgDescriptor)(nil) -func (r *result) createMessageDescriptor(ret *msgDescriptor, md *descriptorpb.DescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) { +func (r *result) createMessageDescriptor(md *descriptorpb.DescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *msgDescriptor { + ret := &msgDescriptor{file: r, parent: parent, index: index, proto: md, fqn: fqn} r.descriptors[fqn] = ret - ret.MessageDescriptor = noOpMessage - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = md - ret.fqn = fqn - prefix := fqn + "." // NB: We MUST create fields before oneofs so that we can populate the // set of fields that belong to the oneof - ret.fields = r.createFields(prefix, ret, md.Field, pool) - ret.oneofs = r.createOneofs(prefix, ret, md.OneofDecl, pool) - ret.nestedMessages = r.createMessages(prefix, ret, md.NestedType, pool) - ret.nestedEnums = r.createEnums(prefix, ret, md.EnumType, pool) - ret.nestedExtensions = r.createExtensions(prefix, ret, md.Extension, pool) + ret.fields = r.createFields(prefix, ret, md.Field) + ret.oneofs = r.createOneOfs(prefix, ret, md.OneofDecl) + ret.nestedMessages = r.createMessages(prefix, ret, md.NestedType) + ret.nestedEnums = r.createEnums(prefix, ret, md.EnumType) + ret.nestedExtensions = r.createExtensions(prefix, ret, md.Extension) ret.extRanges = createFieldRanges(md.ExtensionRange) ret.rsvdRanges = createFieldRanges(md.ReservedRange) ret.rsvdNames = names{s: md.ReservedName} + + return ret } func (m *msgDescriptor) MessageDescriptorProto() *descriptorpb.DescriptorProto { @@ -648,13 +742,13 @@ func (f fieldRanges) Has(n protoreflect.FieldNumber) bool { type enumDescriptors struct { protoreflect.EnumDescriptors - enums []enumDescriptor + enums []*enumDescriptor } -func (r *result) createEnums(prefix string, parent protoreflect.Descriptor, enumProtos []*descriptorpb.EnumDescriptorProto, pool *allocPool) enumDescriptors { - enums := pool.getEnums(len(enumProtos)) +func (r *result) createEnums(prefix string, parent protoreflect.Descriptor, enumProtos []*descriptorpb.EnumDescriptorProto) enumDescriptors { + enums := make([]*enumDescriptor, len(enumProtos)) for i, enumProto := range enumProtos { - r.createEnumDescriptor(&enums[i], enumProto, parent, i, prefix+enumProto.GetName(), pool) + enums[i] = r.createEnumDescriptor(enumProto, parent, i, prefix+enumProto.GetName()) } return enumDescriptors{enums: enums} } @@ -664,14 +758,13 @@ func (e *enumDescriptors) Len() int { } func (e *enumDescriptors) Get(i int) protoreflect.EnumDescriptor { - return &e.enums[i] + return e.enums[i] } func (e *enumDescriptors) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { - for i := range e.enums { - enum := &e.enums[i] - if enum.Name() == s { - return enum + for _, en := range e.enums { + if en.Name() == s { + return en } } return nil @@ -694,24 +787,19 @@ type enumDescriptor struct { var _ protoreflect.EnumDescriptor = (*enumDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*enumDescriptor)(nil) -func (r *result) createEnumDescriptor(ret *enumDescriptor, ed *descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) { +func (r *result) createEnumDescriptor(ed *descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *enumDescriptor { + ret := &enumDescriptor{file: r, parent: parent, index: index, proto: ed, fqn: fqn} r.descriptors[fqn] = ret - ret.EnumDescriptor = noOpEnum - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = ed - ret.fqn = fqn - - // Unlike all other elements, the fully-qualified names of enum values - // are NOT scoped to their parent element (the enum), but rather to + // Unlike all other elements, the fully-qualified name of enum values + // is NOT scoped to their parent element (the enum), but rather to // the enum's parent element. This follows C++ scoping rules for // enum values. prefix := strings.TrimSuffix(fqn, ed.GetName()) - ret.values = r.createEnumValues(prefix, ret, ed.Value, pool) + ret.values = r.createEnumValues(prefix, ret, ed.Value) ret.rsvdRanges = createEnumRanges(ed.ReservedRange) ret.rsvdNames = names{s: ed.ReservedName} + return ret } func (e *enumDescriptor) EnumDescriptorProto() *descriptorpb.EnumDescriptorProto { @@ -766,11 +854,6 @@ func (e *enumDescriptor) ReservedRanges() protoreflect.EnumRanges { return e.rsvdRanges } -func (e *enumDescriptor) IsClosed() bool { - enumType := resolveFeature(e, enumTypeField) - return descriptorpb.FeatureSet_EnumType(enumType.Enum()) == descriptorpb.FeatureSet_CLOSED -} - type enumRanges struct { protoreflect.EnumRanges ranges [][2]protoreflect.EnumNumber @@ -806,13 +889,13 @@ func (e enumRanges) Has(n protoreflect.EnumNumber) bool { type enValDescriptors struct { protoreflect.EnumValueDescriptors - vals []enValDescriptor + vals []*enValDescriptor } -func (r *result) createEnumValues(prefix string, parent *enumDescriptor, enValProtos []*descriptorpb.EnumValueDescriptorProto, pool *allocPool) enValDescriptors { - vals := pool.getEnumValues(len(enValProtos)) +func (r *result) createEnumValues(prefix string, parent *enumDescriptor, enValProtos []*descriptorpb.EnumValueDescriptorProto) enValDescriptors { + vals := make([]*enValDescriptor, len(enValProtos)) for i, enValProto := range enValProtos { - r.createEnumValueDescriptor(&vals[i], enValProto, parent, i, prefix+enValProto.GetName()) + vals[i] = r.createEnumValueDescriptor(enValProto, parent, i, prefix+enValProto.GetName()) } return enValDescriptors{vals: vals} } @@ -822,12 +905,11 @@ func (e *enValDescriptors) Len() int { } func (e *enValDescriptors) Get(i int) protoreflect.EnumValueDescriptor { - return &e.vals[i] + return e.vals[i] } func (e *enValDescriptors) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { - for i := range e.vals { - val := &e.vals[i] + for _, val := range e.vals { if val.Name() == s { return val } @@ -836,8 +918,7 @@ func (e *enValDescriptors) ByName(s protoreflect.Name) protoreflect.EnumValueDes } func (e *enValDescriptors) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { - for i := range e.vals { - val := &e.vals[i] + for _, val := range e.vals { if val.Number() == n { return val } @@ -857,14 +938,10 @@ type enValDescriptor struct { var _ protoreflect.EnumValueDescriptor = (*enValDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*enValDescriptor)(nil) -func (r *result) createEnumValueDescriptor(ret *enValDescriptor, ed *descriptorpb.EnumValueDescriptorProto, parent *enumDescriptor, index int, fqn string) { +func (r *result) createEnumValueDescriptor(ed *descriptorpb.EnumValueDescriptorProto, parent *enumDescriptor, index int, fqn string) *enValDescriptor { + ret := &enValDescriptor{file: r, parent: parent, index: index, proto: ed, fqn: fqn} r.descriptors[fqn] = ret - ret.EnumValueDescriptor = noOpEnumValue - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = ed - ret.fqn = fqn + return ret } func (e *enValDescriptor) EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto { @@ -913,13 +990,13 @@ func (e *enValDescriptor) Number() protoreflect.EnumNumber { type extDescriptors struct { protoreflect.ExtensionDescriptors - exts []extTypeDescriptor + exts []*extTypeDescriptor } -func (r *result) createExtensions(prefix string, parent protoreflect.Descriptor, extProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) extDescriptors { - exts := pool.getExtensions(len(extProtos)) +func (r *result) createExtensions(prefix string, parent protoreflect.Descriptor, extProtos []*descriptorpb.FieldDescriptorProto) extDescriptors { + exts := make([]*extTypeDescriptor, len(extProtos)) for i, extProto := range extProtos { - r.createExtTypeDescriptor(&exts[i], extProto, parent, i, prefix+extProto.GetName()) + exts[i] = r.createExtTypeDescriptor(extProto, parent, i, prefix+extProto.GetName()) } return extDescriptors{exts: exts} } @@ -929,12 +1006,11 @@ func (e *extDescriptors) Len() int { } func (e *extDescriptors) Get(i int) protoreflect.ExtensionDescriptor { - return &e.exts[i] + return e.exts[i] } func (e *extDescriptors) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { - for i := range e.exts { - ext := &e.exts[i] + for _, ext := range e.exts { if ext.Name() == s { return ext } @@ -944,15 +1020,15 @@ func (e *extDescriptors) ByName(s protoreflect.Name) protoreflect.ExtensionDescr type extTypeDescriptor struct { protoreflect.ExtensionTypeDescriptor - field fldDescriptor + field *fldDescriptor } var _ protoutil.DescriptorProtoWrapper = &extTypeDescriptor{} -func (r *result) createExtTypeDescriptor(ret *extTypeDescriptor, fd *descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) { +func (r *result) createExtTypeDescriptor(fd *descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) *extTypeDescriptor { + ret := &fldDescriptor{file: r, parent: parent, index: index, proto: fd, fqn: fqn} r.descriptors[fqn] = ret - ret.field = fldDescriptor{FieldDescriptor: noOpExtension, file: r, parent: parent, index: index, proto: fd, fqn: fqn} - ret.ExtensionTypeDescriptor = dynamicpb.NewExtensionType(&ret.field).TypeDescriptor() + return &extTypeDescriptor{ExtensionTypeDescriptor: dynamicpb.NewExtensionType(ret).TypeDescriptor(), field: ret} } func (e *extTypeDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { @@ -965,23 +1041,15 @@ func (e *extTypeDescriptor) AsProto() proto.Message { type fldDescriptors struct { protoreflect.FieldDescriptors - // We use pointers here, instead of flattened slice, because oneofs - // also have fields, but need to point to values in the parent - // message's fields. Even though they are pointers, in the containing - // message, we always allocate a flattened slice and then point into - // that, so we're still doing fewer allocations (2 per set of fields - // instead of 1 per each field). fields []*fldDescriptor } -func (r *result) createFields(prefix string, parent *msgDescriptor, fldProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) fldDescriptors { - fields := pool.getFields(len(fldProtos)) - fieldPtrs := make([]*fldDescriptor, len(fldProtos)) +func (r *result) createFields(prefix string, parent *msgDescriptor, fldProtos []*descriptorpb.FieldDescriptorProto) fldDescriptors { + fields := make([]*fldDescriptor, len(fldProtos)) for i, fldProto := range fldProtos { - r.createFieldDescriptor(&fields[i], fldProto, parent, i, prefix+fldProto.GetName()) - fieldPtrs[i] = &fields[i] + fields[i] = r.createFieldDescriptor(fldProto, parent, i, prefix+fldProto.GetName()) } - return fldDescriptors{fields: fieldPtrs} + return fldDescriptors{fields: fields} } func (f *fldDescriptors) Len() int { @@ -1011,17 +1079,7 @@ func (f *fldDescriptors) ByJSONName(s string) protoreflect.FieldDescriptor { } func (f *fldDescriptors) ByTextName(s string) protoreflect.FieldDescriptor { - fld := f.ByName(protoreflect.Name(s)) - if fld != nil { - return fld - } - // Groups use type name instead, so we fallback to slow search - for _, fld := range f.fields { - if fld.TextName() == s { - return fld - } - } - return nil + return f.ByName(protoreflect.Name(s)) } func (f *fldDescriptors) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { @@ -1050,14 +1108,10 @@ type fldDescriptor struct { var _ protoreflect.FieldDescriptor = (*fldDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*fldDescriptor)(nil) -func (r *result) createFieldDescriptor(ret *fldDescriptor, fd *descriptorpb.FieldDescriptorProto, parent *msgDescriptor, index int, fqn string) { +func (r *result) createFieldDescriptor(fd *descriptorpb.FieldDescriptorProto, parent *msgDescriptor, index int, fqn string) *fldDescriptor { + ret := &fldDescriptor{file: r, parent: parent, index: index, proto: fd, fqn: fqn} r.descriptors[fqn] = ret - ret.FieldDescriptor = noOpField - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = fd - ret.fqn = fqn + return ret } func (f *fldDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto { @@ -1111,14 +1165,6 @@ func (f *fldDescriptor) Cardinality() protoreflect.Cardinality { case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED: return protoreflect.Required case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL: - if f.Syntax() == protoreflect.Editions { - // Editions does not use label to indicate required. It instead - // uses a feature, and label is always optional. - fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum()) - if fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED { - return protoreflect.Required - } - } return protoreflect.Optional default: return 0 @@ -1126,15 +1172,6 @@ func (f *fldDescriptor) Cardinality() protoreflect.Cardinality { } func (f *fldDescriptor) Kind() protoreflect.Kind { - if f.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && f.Syntax() == protoreflect.Editions && - !f.IsMap() && !f.parentIsMap() { - // In editions, "group encoding" (aka "delimited encoding") is toggled - // via a feature. So we report group kind when that feature is enabled. - messageEncoding := resolveFeature(f, messageEncodingField) - if descriptorpb.FeatureSet_MessageEncoding(messageEncoding.Enum()) == descriptorpb.FeatureSet_DELIMITED { - return protoreflect.GroupKind - } - } return protoreflect.Kind(f.proto.GetType()) } @@ -1153,32 +1190,17 @@ func (f *fldDescriptor) TextName() string { if f.IsExtension() { return fmt.Sprintf("[%s]", f.FullName()) } - if f.looksLikeGroup() { - // groups use the type name - return string(protoreflect.FullName(f.proto.GetTypeName()).Name()) - } return string(f.Name()) } -func (f *fldDescriptor) looksLikeGroup() bool { - // It looks like a group if it uses group/delimited encoding (checked via f.Kind) - // and the message type is a sibling whose name is a mixed-case version of the field name. - return f.Kind() == protoreflect.GroupKind && - f.Message().FullName().Parent() == f.FullName().Parent() && - string(f.Name()) == strings.ToLower(string(f.Message().Name())) -} - func (f *fldDescriptor) HasPresence() bool { if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { return false } - if f.IsExtension() || + return f.IsExtension() || + f.Syntax() == protoreflect.Proto2 || f.Kind() == protoreflect.MessageKind || f.Kind() == protoreflect.GroupKind || - f.proto.OneofIndex != nil { - return true - } - fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum()) - return fieldPresence == descriptorpb.FeatureSet_EXPLICIT || fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED + f.proto.OneofIndex != nil } func (f *fldDescriptor) IsExtension() bool { @@ -1205,16 +1227,30 @@ func (f *fldDescriptor) IsWeak() bool { } func (f *fldDescriptor) IsPacked() bool { - if f.Cardinality() != protoreflect.Repeated || !internal.CanPack(f.Kind()) { - return false - } opts := f.proto.GetOptions() + if opts.GetPacked() { + return true + } if opts != nil && opts.Packed != nil { - // packed option is set explicitly - return *opts.Packed + // explicitly not packed + return false + } + + // proto3 defaults to packed for repeated scalar numeric fields + if f.file.Syntax() != protoreflect.Proto3 { + return false + } + if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false + } + switch f.proto.GetType() { + case descriptorpb.FieldDescriptorProto_TYPE_GROUP, descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, + descriptorpb.FieldDescriptorProto_TYPE_BYTES, descriptorpb.FieldDescriptorProto_TYPE_STRING: + return false + default: + // all others can be packed + return true } - fieldEncoding := resolveFeature(f, repeatedFieldEncodingField) - return descriptorpb.FeatureSet_RepeatedFieldEncoding(fieldEncoding.Enum()) == descriptorpb.FeatureSet_PACKED } func (f *fldDescriptor) IsList() bool { @@ -1241,11 +1277,6 @@ func (f *fldDescriptor) isMapEntry() bool { return f.Message().IsMapEntry() } -func (f *fldDescriptor) parentIsMap() bool { - parent, ok := f.parent.(protoreflect.MessageDescriptor) - return ok && parent.IsMapEntry() -} - func (f *fldDescriptor) MapKey() protoreflect.FieldDescriptor { if !f.IsMap() { return nil @@ -1512,13 +1543,13 @@ func (f *fldDescriptor) Message() protoreflect.MessageDescriptor { type oneofDescriptors struct { protoreflect.OneofDescriptors - oneofs []oneofDescriptor + oneofs []*oneofDescriptor } -func (r *result) createOneofs(prefix string, parent *msgDescriptor, ooProtos []*descriptorpb.OneofDescriptorProto, pool *allocPool) oneofDescriptors { - oos := pool.getOneofs(len(ooProtos)) +func (r *result) createOneOfs(prefix string, parent *msgDescriptor, ooProtos []*descriptorpb.OneofDescriptorProto) oneofDescriptors { + oos := make([]*oneofDescriptor, len(ooProtos)) for i, fldProto := range ooProtos { - r.createOneofDescriptor(&oos[i], fldProto, parent, i, prefix+fldProto.GetName()) + oos[i] = r.createOneOfDescriptor(fldProto, parent, i, prefix+fldProto.GetName()) } return oneofDescriptors{oneofs: oos} } @@ -1528,12 +1559,11 @@ func (o *oneofDescriptors) Len() int { } func (o *oneofDescriptors) Get(i int) protoreflect.OneofDescriptor { - return &o.oneofs[i] + return o.oneofs[i] } func (o *oneofDescriptors) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { - for i := range o.oneofs { - oo := &o.oneofs[i] + for _, oo := range o.oneofs { if oo.Name() == s { return oo } @@ -1555,14 +1585,9 @@ type oneofDescriptor struct { var _ protoreflect.OneofDescriptor = (*oneofDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*oneofDescriptor)(nil) -func (r *result) createOneofDescriptor(ret *oneofDescriptor, ood *descriptorpb.OneofDescriptorProto, parent *msgDescriptor, index int, fqn string) { +func (r *result) createOneOfDescriptor(ood *descriptorpb.OneofDescriptorProto, parent *msgDescriptor, index int, fqn string) *oneofDescriptor { + ret := &oneofDescriptor{file: r, parent: parent, index: index, proto: ood, fqn: fqn} r.descriptors[fqn] = ret - ret.OneofDescriptor = noOpOneof - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = ood - ret.fqn = fqn var fields []*fldDescriptor for _, fld := range parent.fields.fields { @@ -1571,9 +1596,11 @@ func (r *result) createOneofDescriptor(ret *oneofDescriptor, ood *descriptorpb.O } } ret.fields = fldDescriptors{fields: fields} + + return ret } -func (o *oneofDescriptor) OneofDescriptorProto() *descriptorpb.OneofDescriptorProto { +func (o *oneofDescriptor) OneOfDescriptorProto() *descriptorpb.OneofDescriptorProto { return o.proto } @@ -1628,13 +1655,13 @@ func (o *oneofDescriptor) Fields() protoreflect.FieldDescriptors { type svcDescriptors struct { protoreflect.ServiceDescriptors - svcs []svcDescriptor + svcs []*svcDescriptor } -func (r *result) createServices(prefix string, svcProtos []*descriptorpb.ServiceDescriptorProto, pool *allocPool) svcDescriptors { - svcs := pool.getServices(len(svcProtos)) +func (r *result) createServices(prefix string, svcProtos []*descriptorpb.ServiceDescriptorProto) svcDescriptors { + svcs := make([]*svcDescriptor, len(svcProtos)) for i, svcProto := range svcProtos { - r.createServiceDescriptor(&svcs[i], svcProto, i, prefix+svcProto.GetName(), pool) + svcs[i] = r.createServiceDescriptor(svcProto, i, prefix+svcProto.GetName()) } return svcDescriptors{svcs: svcs} } @@ -1644,12 +1671,11 @@ func (s *svcDescriptors) Len() int { } func (s *svcDescriptors) Get(i int) protoreflect.ServiceDescriptor { - return &s.svcs[i] + return s.svcs[i] } func (s *svcDescriptors) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { - for i := range s.svcs { - svc := &s.svcs[i] + for _, svc := range s.svcs { if svc.Name() == n { return svc } @@ -1670,16 +1696,14 @@ type svcDescriptor struct { var _ protoreflect.ServiceDescriptor = (*svcDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*svcDescriptor)(nil) -func (r *result) createServiceDescriptor(ret *svcDescriptor, sd *descriptorpb.ServiceDescriptorProto, index int, fqn string, pool *allocPool) { +func (r *result) createServiceDescriptor(sd *descriptorpb.ServiceDescriptorProto, index int, fqn string) *svcDescriptor { + ret := &svcDescriptor{file: r, index: index, proto: sd, fqn: fqn} r.descriptors[fqn] = ret - ret.ServiceDescriptor = noOpService - ret.file = r - ret.index = index - ret.proto = sd - ret.fqn = fqn prefix := fqn + "." - ret.methods = r.createMethods(prefix, ret, sd.Method, pool) + ret.methods = r.createMethods(prefix, ret, sd.Method) + + return ret } func (s *svcDescriptor) ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto { @@ -1728,13 +1752,13 @@ func (s *svcDescriptor) Methods() protoreflect.MethodDescriptors { type mtdDescriptors struct { protoreflect.MethodDescriptors - mtds []mtdDescriptor + mtds []*mtdDescriptor } -func (r *result) createMethods(prefix string, parent *svcDescriptor, mtdProtos []*descriptorpb.MethodDescriptorProto, pool *allocPool) mtdDescriptors { - mtds := pool.getMethods(len(mtdProtos)) +func (r *result) createMethods(prefix string, parent *svcDescriptor, mtdProtos []*descriptorpb.MethodDescriptorProto) mtdDescriptors { + mtds := make([]*mtdDescriptor, len(mtdProtos)) for i, mtdProto := range mtdProtos { - r.createMethodDescriptor(&mtds[i], mtdProto, parent, i, prefix+mtdProto.GetName()) + mtds[i] = r.createMethodDescriptor(mtdProto, parent, i, prefix+mtdProto.GetName()) } return mtdDescriptors{mtds: mtds} } @@ -1744,12 +1768,11 @@ func (m *mtdDescriptors) Len() int { } func (m *mtdDescriptors) Get(i int) protoreflect.MethodDescriptor { - return &m.mtds[i] + return m.mtds[i] } func (m *mtdDescriptors) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { - for i := range m.mtds { - mtd := &m.mtds[i] + for _, mtd := range m.mtds { if mtd.Name() == n { return mtd } @@ -1771,14 +1794,10 @@ type mtdDescriptor struct { var _ protoreflect.MethodDescriptor = (*mtdDescriptor)(nil) var _ protoutil.DescriptorProtoWrapper = (*mtdDescriptor)(nil) -func (r *result) createMethodDescriptor(ret *mtdDescriptor, mtd *descriptorpb.MethodDescriptorProto, parent *svcDescriptor, index int, fqn string) { +func (r *result) createMethodDescriptor(mtd *descriptorpb.MethodDescriptorProto, parent *svcDescriptor, index int, fqn string) *mtdDescriptor { + ret := &mtdDescriptor{file: r, parent: parent, index: index, proto: mtd, fqn: fqn} r.descriptors[fqn] = ret - ret.MethodDescriptor = noOpMethod - ret.file = r - ret.parent = parent - ret.index = index - ret.proto = mtd - ret.fqn = fqn + return ret } func (m *mtdDescriptor) MethodDescriptorProto() *descriptorpb.MethodDescriptorProto { @@ -1850,35 +1869,12 @@ func (r *result) FindDescriptorByName(name protoreflect.FullName) protoreflect.D return r.descriptors[fqn] } +func (r *result) importsAsFiles() Files { + return r.deps +} + func (r *result) hasSource() bool { n := r.FileNode() _, ok := n.(*ast.FileNode) return ok } - -// resolveFeature resolves a feature for the given descriptor. If the given element -// is in a proto2 or proto3 syntax file, this skips resolution and just returns the -// relevant default (since such files are not allowed to override features). -// -// If neither the given element nor any of its ancestors override the given feature, -// the relevant default is returned. -func resolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) protoreflect.Value { - edition := editions.GetEdition(element) - if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 { - // these syntax levels can't specify features, so we can short-circuit the search - // through the descriptor hierarchy for feature overrides - defaults := editions.GetEditionDefaults(edition) - return defaults.ProtoReflect().Get(feature) // returns default value if field is not present - } - val, err := editions.ResolveFeature(element, feature) - if err == nil && val.IsValid() { - return val - } - defaults := editions.GetEditionDefaults(edition) - return defaults.ProtoReflect().Get(feature) -} - -func isJSONCompliant(d protoreflect.Descriptor) bool { - jsonFormat := resolveFeature(d, jsonFormatField) - return descriptorpb.FeatureSet_JsonFormat(jsonFormat.Enum()) == descriptorpb.FeatureSet_ALLOW -} diff --git a/vendor/github.com/bufbuild/protocompile/linker/doc.go b/vendor/github.com/bufbuild/protocompile/linker/doc.go index 93e68d9c..455c2ef6 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/doc.go +++ b/vendor/github.com/bufbuild/protocompile/linker/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/linker/files.go b/vendor/github.com/bufbuild/protocompile/linker/files.go index 51ce3a8b..3acdcade 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/files.go +++ b/vendor/github.com/bufbuild/protocompile/linker/files.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ import ( // File is like a super-powered protoreflect.FileDescriptor. It includes helpful // methods for looking up elements in the descriptor and can be used to create a -// resolver for the entire transitive closure of the file's dependencies. (See +// resolver for all of the file's transitive closure of dependencies. (See // ResolverFromFile.) type File interface { protoreflect.FileDescriptor @@ -42,6 +42,10 @@ type File interface { // that extends the given message name. If no such extension is defined in this // file, nil is returned. FindExtensionByNumber(message protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor + // Imports returns this file's imports. These are only the files directly + // imported by the file. Indirect transitive dependencies will not be in + // the returned slice. + importsAsFiles() Files } // NewFile converts a protoreflect.FileDescriptor to a File. The given deps must @@ -131,8 +135,6 @@ type file struct { deps Files } -var _ File = (*file)(nil) - func (f *file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { return f.descs[name] } @@ -145,10 +147,12 @@ func (f *file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect return findExtension(f, msg, tag) } -func (f *file) Unwrap() protoreflect.FileDescriptor { - return f.FileDescriptor +func (f *file) importsAsFiles() Files { + return f.deps } +var _ File = (*file)(nil) + // Files represents a set of protobuf files. It is a slice of File values, but // also provides a method for easily looking up files by path and name. type Files []File @@ -183,53 +187,58 @@ type Resolver interface { protoregistry.ExtensionTypeResolver } -// ResolverFromFile returns a Resolver that can resolve any element that is -// visible to the given file. It will search the given file, its imports, and -// any transitive public imports. +// ResolverFromFile returns a Resolver that uses the given file plus all of its +// imports as the source of descriptors. If a given query cannot be answered with +// these files, the query will fail with a protoregistry.NotFound error. This +// does not recursively search the entire transitive closure; it only searches +// the given file and its immediate dependencies. This is useful for resolving +// elements visible to the file. +// +// If the given file is the result of a call to Link, then all dependencies +// provided in the call to Link are searched (which could actually include more +// than just the file's direct imports). // // Note that this function does not compute any additional indexes for efficient // search, so queries generally take linear time, O(n) where n is the number of -// files whose elements are visible to the given file. Queries for an extension -// by number have runtime complexity that is linear with the number of messages -// and extensions defined across those files. +// files in the transitive closure of the given file. Queries for an extension +// by number are linear with the number of messages and extensions defined across +// all the files. func ResolverFromFile(f File) Resolver { - return fileResolver{f: f} + return fileResolver{ + f: f, + deps: f.importsAsFiles().AsResolver(), + } } type fileResolver struct { - f File + f File + deps Resolver } func (r fileResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { - return resolveInFile(r.f, false, nil, func(f File) (protoreflect.FileDescriptor, error) { - if f.Path() == path { - return f, nil - } - return nil, protoregistry.NotFound - }) + if r.f.Path() == path { + return r.f, nil + } + return r.deps.FindFileByPath(path) } func (r fileResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { - return resolveInFile(r.f, false, nil, func(f File) (protoreflect.Descriptor, error) { - if d := f.FindDescriptorByName(name); d != nil { - return d, nil - } - return nil, protoregistry.NotFound - }) + d := r.f.FindDescriptorByName(name) + if d != nil { + return d, nil + } + return r.deps.FindDescriptorByName(name) } func (r fileResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return resolveInFile(r.f, false, nil, func(f File) (protoreflect.MessageType, error) { - d := f.FindDescriptorByName(message) - if d != nil { - md, ok := d.(protoreflect.MessageDescriptor) - if !ok { - return nil, fmt.Errorf("%q is %s, not a message", message, descriptorTypeWithArticle(d)) - } + d := r.f.FindDescriptorByName(message) + if d != nil { + if md, ok := d.(protoreflect.MessageDescriptor); ok { return dynamicpb.NewMessageType(md), nil } return nil, protoregistry.NotFound - }) + } + return r.deps.FindMessageByName(message) } func (r fileResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { @@ -239,34 +248,35 @@ func (r fileResolver) FindMessageByURL(url string) (protoreflect.MessageType, er func messageNameFromURL(url string) string { lastSlash := strings.LastIndexByte(url, '/') - return url[lastSlash+1:] + var fullName string + if lastSlash >= 0 { + fullName = url[lastSlash+1:] + } else { + fullName = url + } + return fullName } func (r fileResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) { - d := f.FindDescriptorByName(field) - if d != nil { - fld, ok := d.(protoreflect.FieldDescriptor) - if !ok || !fld.IsExtension() { - return nil, fmt.Errorf("%q is %s, not an extension", field, descriptorTypeWithArticle(d)) - } - if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { - return extd.Type(), nil - } + d := r.f.FindDescriptorByName(field) + if d != nil { + if extd, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extd.Type(), nil + } + if fld, ok := d.(protoreflect.FieldDescriptor); ok && fld.IsExtension() { return dynamicpb.NewExtensionType(fld), nil } return nil, protoregistry.NotFound - }) + } + return r.deps.FindExtensionByName(field) } func (r fileResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) { - ext := findExtension(f, message, field) - if ext != nil { - return ext.Type(), nil - } - return nil, protoregistry.NotFound - }) + ext := findExtension(r.f, message, field) + if ext != nil { + return ext.Type(), nil + } + return r.deps.FindExtensionByNumber(message, field) } type filesResolver []File diff --git a/vendor/github.com/bufbuild/protocompile/linker/linker.go b/vendor/github.com/bufbuild/protocompile/linker/linker.go index 6d878838..d8d1c9a6 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/linker.go +++ b/vendor/github.com/bufbuild/protocompile/linker/linker.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import ( "fmt" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/parser" @@ -59,7 +60,6 @@ func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *r } r := &result{ - FileDescriptor: noOpFile, Result: parsed, deps: dependencies, descriptors: map[string]protoreflect.Descriptor{}, @@ -67,10 +67,8 @@ func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *r prefix: prefix, optionQualifiedNames: map[ast.IdentValueNode]string{}, } - // First, we create the hierarchy of descendant descriptors. - r.createDescendants() - // Then we can put all symbols into a single pool, which lets us ensure there + // First, we put all symbols into a single pool, which lets us ensure there // are no duplicate symbols and will also let us resolve and revise all type // references in next step. if err := symbols.importResult(r, handler); err != nil { @@ -108,7 +106,24 @@ func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *r type Result interface { File parser.Result - + // ResolveEnumType returns an enum descriptor for the given named enum that + // is available in this file. If no such element is available or if the + // named element is not an enum, nil is returned. + ResolveEnumType(protoreflect.FullName) protoreflect.EnumDescriptor + // ResolveMessageType returns a message descriptor for the given named + // message that is available in this file. If no such element is available + // or if the named element is not a message, nil is returned. + ResolveMessageType(protoreflect.FullName) protoreflect.MessageDescriptor + // ResolveOptionsType returns a message descriptor for the given options + // type. This is like ResolveMessageType but searches the result's entire + // set of transitive dependencies without regard for visibility. If no + // such element is available or if the named element is not a message, nil + // is returned. + ResolveOptionsType(protoreflect.FullName) protoreflect.MessageDescriptor + // ResolveExtension returns an extension descriptor for the given named + // extension that is available in this file. If no such element is available + // or if the named element is not an extension, nil is returned. + ResolveExtension(protoreflect.FullName) protoreflect.ExtensionTypeDescriptor // ResolveMessageLiteralExtensionName returns the fully qualified name for // an identifier for extension field names in message literals. ResolveMessageLiteralExtensionName(ast.IdentValueNode) string @@ -116,7 +131,7 @@ type Result interface { // be done after options are interpreted. Any errors or warnings encountered // will be reported via the given handler. If any error is reported, this // function returns a non-nil error. - ValidateOptions(handler *reporter.Handler, symbols *Symbols) error + ValidateOptions(handler *reporter.Handler) error // CheckForUnusedImports is used to report warnings for unused imports. This // should be called after options have been interpreted. Otherwise, the logic // could incorrectly report imports as unused if the only symbol used were a @@ -129,6 +144,21 @@ type Result interface { // interpreting options (which is done after linking). PopulateSourceCodeInfo() + // CanonicalProto returns the file descriptor proto in a form that + // will be serialized in a canonical way. The "canonical" way matches + // the way that "protoc" emits option values, which is a way that + // mostly matches the way options are defined in source, including + // ordering and de-structuring. Unlike the FileDescriptorProto() method, this + // method is more expensive and results in a new descriptor proto + // being constructed with each call. + // + // The returned value will have all options (fields of the various + // descriptorpb.*Options message types) represented via unrecognized + // fields. So the returned value will serialize as desired, but it + // is otherwise not useful since all option values are treated as + // unknown. + CanonicalProto() *descriptorpb.FileDescriptorProto + // RemoveAST drops the AST information from this result. RemoveAST() } diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go index e00debc6..89475e69 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_no_unsafe.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go index aa33e74f..cf0d0c26 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go +++ b/vendor/github.com/bufbuild/protocompile/linker/pathkey_unsafe.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,10 +30,6 @@ import ( var pathElementType = reflect.TypeOf(protoreflect.SourcePath{}).Elem() func pathKey(p protoreflect.SourcePath) interface{} { - if p == nil { - // Reflection code below doesn't work with nil slices - return [0]int32{} - } hdr := (*reflect.SliceHeader)(unsafe.Pointer(reflect.ValueOf(&p).Pointer())) array := reflect.NewAt(reflect.ArrayOf(hdr.Len, pathElementType), unsafe.Pointer(hdr.Data)) return array.Elem().Interface() diff --git a/vendor/github.com/bufbuild/protocompile/linker/pool.go b/vendor/github.com/bufbuild/protocompile/linker/pool.go deleted file mode 100644 index 3609edcb..00000000 --- a/vendor/github.com/bufbuild/protocompile/linker/pool.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linker - -import "google.golang.org/protobuf/types/descriptorpb" - -// allocPool helps allocate descriptor instances. Instead of allocating -// them one at a time, we allocate a pool -- a large, flat slice to hold -// all descriptors of a particular kind for a file. We then use capacity -// in the pool when we need space for individual descriptors. -type allocPool struct { - numMessages int - numFields int - numOneofs int - numEnums int - numEnumValues int - numExtensions int - numServices int - numMethods int - - messages []msgDescriptor - fields []fldDescriptor - oneofs []oneofDescriptor - enums []enumDescriptor - enumVals []enValDescriptor - extensions []extTypeDescriptor - services []svcDescriptor - methods []mtdDescriptor -} - -func newAllocPool(file *descriptorpb.FileDescriptorProto) *allocPool { - var pool allocPool - pool.countElements(file) - pool.messages = make([]msgDescriptor, pool.numMessages) - pool.fields = make([]fldDescriptor, pool.numFields) - pool.oneofs = make([]oneofDescriptor, pool.numOneofs) - pool.enums = make([]enumDescriptor, pool.numEnums) - pool.enumVals = make([]enValDescriptor, pool.numEnumValues) - pool.extensions = make([]extTypeDescriptor, pool.numExtensions) - pool.services = make([]svcDescriptor, pool.numServices) - pool.methods = make([]mtdDescriptor, pool.numMethods) - return &pool -} - -func (p *allocPool) getMessages(count int) []msgDescriptor { - allocated := p.messages[:count] - p.messages = p.messages[count:] - return allocated -} - -func (p *allocPool) getFields(count int) []fldDescriptor { - allocated := p.fields[:count] - p.fields = p.fields[count:] - return allocated -} - -func (p *allocPool) getOneofs(count int) []oneofDescriptor { - allocated := p.oneofs[:count] - p.oneofs = p.oneofs[count:] - return allocated -} - -func (p *allocPool) getEnums(count int) []enumDescriptor { - allocated := p.enums[:count] - p.enums = p.enums[count:] - return allocated -} - -func (p *allocPool) getEnumValues(count int) []enValDescriptor { - allocated := p.enumVals[:count] - p.enumVals = p.enumVals[count:] - return allocated -} - -func (p *allocPool) getExtensions(count int) []extTypeDescriptor { - allocated := p.extensions[:count] - p.extensions = p.extensions[count:] - return allocated -} - -func (p *allocPool) getServices(count int) []svcDescriptor { - allocated := p.services[:count] - p.services = p.services[count:] - return allocated -} - -func (p *allocPool) getMethods(count int) []mtdDescriptor { - allocated := p.methods[:count] - p.methods = p.methods[count:] - return allocated -} - -func (p *allocPool) countElements(file *descriptorpb.FileDescriptorProto) { - p.countElementsInMessages(file.MessageType) - p.countElementsInEnums(file.EnumType) - p.numExtensions += len(file.Extension) - p.numServices += len(file.Service) - for _, svc := range file.Service { - p.numMethods += len(svc.Method) - } -} - -func (p *allocPool) countElementsInMessages(msgs []*descriptorpb.DescriptorProto) { - p.numMessages += len(msgs) - for _, msg := range msgs { - p.numFields += len(msg.Field) - p.numOneofs += len(msg.OneofDecl) - p.countElementsInMessages(msg.NestedType) - p.countElementsInEnums(msg.EnumType) - p.numExtensions += len(msg.Extension) - } -} - -func (p *allocPool) countElementsInEnums(enums []*descriptorpb.EnumDescriptorProto) { - p.numEnums += len(enums) - for _, enum := range enums { - p.numEnumValues += len(enum.Value) - } -} diff --git a/vendor/github.com/bufbuild/protocompile/linker/resolve.go b/vendor/github.com/bufbuild/protocompile/linker/resolve.go index cf30148c..485aba05 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/resolve.go +++ b/vendor/github.com/bufbuild/protocompile/linker/resolve.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,14 +15,13 @@ package linker import ( - "errors" "fmt" "strings" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" @@ -30,65 +29,58 @@ import ( "github.com/bufbuild/protocompile/walk" ) -func (r *result) ResolveMessageLiteralExtensionName(node ast.IdentValueNode) string { - return r.optionQualifiedNames[node] -} - -func (r *result) resolveElement(name protoreflect.FullName, checkedCache []string) protoreflect.Descriptor { - if len(name) > 0 && name[0] == '.' { - name = name[1:] +func (r *result) ResolveMessageType(name protoreflect.FullName) protoreflect.MessageDescriptor { + d := r.resolveElement(name) + if md, ok := d.(protoreflect.MessageDescriptor); ok { + return md } - res, _ := resolveInFile(r, false, checkedCache[:0], func(f File) (protoreflect.Descriptor, error) { - d := resolveElementInFile(name, f) - if d != nil { - return d, nil - } - return nil, protoregistry.NotFound - }) - return res + return nil } -func resolveInFile[T any](f File, publicImportsOnly bool, checked []string, fn func(File) (T, error)) (T, error) { - var zero T - path := f.Path() - for _, str := range checked { - if str == path { - // already checked - return zero, protoregistry.NotFound - } +func (r *result) ResolveOptionsType(name protoreflect.FullName) protoreflect.MessageDescriptor { + d, _ := ResolverFromFile(r).FindDescriptorByName(name) + md, _ := d.(protoreflect.MessageDescriptor) + if md != nil && md.ParentFile() != nil { + r.markUsed(md.ParentFile().Path()) } - checked = append(checked, path) + return md +} - res, err := fn(f) - if err == nil { - // found it - return res, nil - } - if !errors.Is(err, protoregistry.NotFound) { - return zero, err +func (r *result) ResolveEnumType(name protoreflect.FullName) protoreflect.EnumDescriptor { + d := r.resolveElement(name) + if ed, ok := d.(protoreflect.EnumDescriptor); ok { + return ed } + return nil +} - imports := f.Imports() - for i, l := 0, imports.Len(); i < l; i++ { - imp := imports.Get(i) - if publicImportsOnly && !imp.IsPublic { - continue - } - res, err := resolveInFile(f.FindImportByPath(imp.Path()), true, checked, fn) - if errors.Is(err, protoregistry.NotFound) { - continue +func (r *result) ResolveExtension(name protoreflect.FullName) protoreflect.ExtensionTypeDescriptor { + d := r.resolveElement(name) + if ed, ok := d.(protoreflect.ExtensionDescriptor); ok { + if !ed.IsExtension() { + return nil } - if err != nil { - return zero, err + if td, ok := ed.(protoreflect.ExtensionTypeDescriptor); ok { + return td } - if !imp.IsPublic { - if r, ok := f.(*result); ok { - r.markUsed(imp.Path()) - } - } - return res, nil + return dynamicpb.NewExtensionType(ed).TypeDescriptor() } - return zero, err + return nil +} + +func (r *result) ResolveMessageLiteralExtensionName(node ast.IdentValueNode) string { + return r.optionQualifiedNames[node] +} + +func (r *result) resolveElement(name protoreflect.FullName) protoreflect.Descriptor { + if len(name) > 0 && name[0] == '.' { + name = name[1:] + } + importedFd, res := resolveElement(r, name, false, nil) + if importedFd != nil { + r.markUsed(importedFd.Path()) + } + return res } func (r *result) markUsed(importPath string) { @@ -111,18 +103,50 @@ func (r *result) CheckForUnusedImports(handler *reporter.Handler) { if isPublic { continue } - span := ast.UnknownSpan(fd.GetName()) + pos := ast.UnknownPos(fd.GetName()) if file != nil { for _, decl := range file.Decls { imp, ok := decl.(*ast.ImportNode) if ok && imp.Name.AsString() == dep { - span = file.NodeInfo(imp) + pos = file.NodeInfo(imp).Start() } } } - handler.HandleWarningWithPos(span, errUnusedImport(dep)) + handler.HandleWarningWithPos(pos, errUnusedImport(dep)) + } + } +} + +func resolveElement(f File, fqn protoreflect.FullName, publicImportsOnly bool, checked []string) (imported File, d protoreflect.Descriptor) { + path := f.Path() + for _, str := range checked { + if str == path { + // already checked + return nil, nil + } + } + checked = append(checked, path) + + r := resolveElementInFile(fqn, f) + if r != nil { + // not imported, but present in f + return nil, r + } + + // When publicImportsOnly = false, we are searching only directly imported symbols. But + // we also need to search transitive public imports due to semantics of public imports. + for i := 0; i < f.Imports().Len(); i++ { + dep := f.Imports().Get(i) + if dep.IsPublic || !publicImportsOnly { + depFile := f.FindImportByPath(dep.Path()) + _, d := resolveElement(depFile, fqn, true, checked) + if d != nil { + return depFile, d + } } } + + return nil, nil } func descriptorTypeWithArticle(d protoreflect.Descriptor) string { @@ -152,35 +176,27 @@ func descriptorTypeWithArticle(d protoreflect.Descriptor) string { } } -func (r *result) createDescendants() { +func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error { + // first create the full descriptor hierarchy fd := r.FileDescriptorProto() - pool := newAllocPool(fd) prefix := "" if fd.GetPackage() != "" { prefix = fd.GetPackage() + "." } r.imports = r.createImports() - r.messages = r.createMessages(prefix, r, fd.MessageType, pool) - r.enums = r.createEnums(prefix, r, fd.EnumType, pool) - r.extensions = r.createExtensions(prefix, r, fd.Extension, pool) - r.services = r.createServices(prefix, fd.Service, pool) -} + r.messages = r.createMessages(prefix, r, fd.MessageType) + r.enums = r.createEnums(prefix, r, fd.EnumType) + r.extensions = r.createExtensions(prefix, r, fd.Extension) + r.services = r.createServices(prefix, fd.Service) -func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error { - fd := r.FileDescriptorProto() - checkedCache := make([]string, 0, 16) - scopes := []scope{fileScope(r, checkedCache)} + // then resolve symbol references + scopes := []scope{fileScope(r)} if fd.Options != nil { - if err := r.resolveOptions(handler, "file", protoreflect.FullName(fd.GetName()), fd.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "file", protoreflect.FullName(fd.GetName()), fd.Options.UninterpretedOption, scopes); err != nil { return err } } - // This is to de-dupe extendee-releated error messages when the same - // extendee is referenced from multiple extension field definitions. - // We leave it nil if there's no AST. - var extendeeNodes map[ast.Node]struct{} - return walk.DescriptorsEnterAndExit(r, func(d protoreflect.Descriptor) error { fqn := d.FullName() @@ -191,7 +207,7 @@ func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error // an option cannot refer to it as simply "i" but must qualify it (at a minimum "Msg.i"). // So we don't add this messages scope to our scopes slice until *after* we do options. if d.proto.Options != nil { - if err := r.resolveOptions(handler, "message", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "message", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -200,60 +216,57 @@ func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error for _, er := range d.proto.ExtensionRange { if er.Options != nil { erName := protoreflect.FullName(fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)) - if err := r.resolveOptions(handler, "extension range", erName, er.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "extension range", erName, er.Options.UninterpretedOption, scopes); err != nil { return err } } } case *extTypeDescriptor: if d.field.proto.Options != nil { - if err := r.resolveOptions(handler, "extension", fqn, d.field.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "extension", fqn, d.field.proto.Options.UninterpretedOption, scopes); err != nil { return err } } - if extendeeNodes == nil && r.AST() != nil { - extendeeNodes = map[ast.Node]struct{}{} - } - if err := resolveFieldTypes(&d.field, handler, extendeeNodes, s, scopes, checkedCache); err != nil { + if err := resolveFieldTypes(d.field, handler, s, scopes); err != nil { return err } if r.Syntax() == protoreflect.Proto3 && !allowedProto3Extendee(d.field.proto.GetExtendee()) { file := r.FileNode() node := r.FieldNode(d.field.proto).FieldExtendee() - if err := handler.HandleErrorf(file.NodeInfo(node), "extend blocks in proto3 can only be used to define custom options"); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node).Start(), "extend blocks in proto3 can only be used to define custom options"); err != nil { return err } } case *fldDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "field", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "field", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } - if err := resolveFieldTypes(d, handler, nil, s, scopes, checkedCache); err != nil { + if err := resolveFieldTypes(d, handler, s, scopes); err != nil { return err } case *oneofDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "oneof", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "oneof", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } case *enumDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "enum", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "enum", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } case *enValDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "enum value", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "enum value", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } case *svcDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "service", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "service", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -261,11 +274,11 @@ func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry case *mtdDescriptor: if d.proto.Options != nil { - if err := r.resolveOptions(handler, "method", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil { + if err := r.resolveOptions(handler, "method", fqn, d.proto.Options.UninterpretedOption, scopes); err != nil { return err } } - if err := resolveMethodTypes(d, handler, scopes, checkedCache); err != nil { + if err := resolveMethodTypes(d, handler, scopes); err != nil { return err } } @@ -302,54 +315,25 @@ func allowedProto3Extendee(n string) bool { return ok } -func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees map[ast.Node]struct{}, s *Symbols, scopes []scope, checkedCache []string) error { +func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, s *Symbols, scopes []scope) error { r := f.file fld := f.proto file := r.FileNode() node := r.FieldNode(fld) - kind := "field" + scope := fmt.Sprintf("field %s", f.fqn) if fld.GetExtendee() != "" { - kind = "extension" - var alreadyReported bool - if extendees != nil { - _, alreadyReported = extendees[node.FieldExtendee()] - if !alreadyReported { - extendees[node.FieldExtendee()] = struct{}{} - } - } - dsc := r.resolve(fld.GetExtendee(), false, scopes, checkedCache) + scope := fmt.Sprintf("extension %s", f.fqn) + dsc := r.resolve(fld.GetExtendee(), false, scopes) if dsc == nil { - if alreadyReported { - return nil - } - var extendeePrefix string - if extendees == nil { - extendeePrefix = kind + " " + f.fqn + ": " - } - return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s", extendeePrefix, fld.GetExtendee()) + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "unknown extendee type %s", fld.GetExtendee()) } if isSentinelDescriptor(dsc) { - if alreadyReported { - return nil - } - var extendeePrefix string - if extendees == nil { - extendeePrefix = kind + " " + f.fqn + ": " - } - return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s; resolved to %s which is not defined; consider using a leading dot", extendeePrefix, fld.GetExtendee(), dsc.FullName()) + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "unknown extendee type %s; resolved to %s which is not defined; consider using a leading dot", fld.GetExtendee(), dsc.FullName()) } extd, ok := dsc.(protoreflect.MessageDescriptor) if !ok { - if alreadyReported { - return nil - } - var extendeePrefix string - if extendees == nil { - extendeePrefix = kind + " " + f.fqn + ": " - } - return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sextendee is invalid: %s is %s, not a message", extendeePrefix, dsc.FullName(), descriptorTypeWithArticle(dsc)) + return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()).Start(), "extendee is invalid: %s is %s, not a message", dsc.FullName(), descriptorTypeWithArticle(dsc)) } - f.extendee = extd extendeeName := "." + string(dsc.FullName()) if fld.GetExtendee() != extendeeName { @@ -366,12 +350,12 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma } } if !found { - if err := handler.HandleErrorf(file.NodeInfo(node.FieldTag()), "%s %s: tag %d is not in valid range for extended type %s", kind, f.fqn, tag, dsc.FullName()); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.FieldTag()).Start(), "%s: tag %d is not in valid range for extended type %s", scope, tag, dsc.FullName()); err != nil { return err } } else { // make sure tag is not a duplicate - if err := s.AddExtension(packageFor(dsc), dsc.FullName(), tag, file.NodeInfo(node.FieldTag()), handler); err != nil { + if err := s.AddExtension(packageFor(dsc), dsc.FullName(), tag, file.NodeInfo(node.FieldTag()).Start(), handler); err != nil { return err } } @@ -386,12 +370,12 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma return nil } - dsc := r.resolve(fld.GetTypeName(), true, scopes, checkedCache) + dsc := r.resolve(fld.GetTypeName(), true, scopes) if dsc == nil { - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s", kind, f.fqn, fld.GetTypeName()) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: unknown type %s", scope, fld.GetTypeName()) } if isSentinelDescriptor(dsc) { - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s; resolved to %s which is not defined; consider using a leading dot", kind, f.fqn, fld.GetTypeName(), dsc.FullName()) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: unknown type %s; resolved to %s which is not defined; consider using a leading dot", scope, fld.GetTypeName(), dsc.FullName()) } switch dsc := dsc.(type) { case protoreflect.MessageDescriptor: @@ -401,7 +385,7 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma case *ast.MapFieldNode: // We have an AST for this file and can see this field is from a map declaration isValid = true - case *ast.NoSourceNode: + case ast.NoSourceNode: // We don't have an AST for the file (it came from a provided descriptor). So we // need to validate that it's not an illegal reference. To be valid, the field // must be repeated and the entry type must be nested in the same enclosing @@ -419,7 +403,7 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma } } if !isValid { - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: %s is a synthetic map entry and may not be referenced explicitly", kind, f.fqn, dsc.FullName()) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: %s is a synthetic map entry and may not be referenced explicitly", scope, dsc.FullName()) } } typeName := "." + string(dsc.FullName()) @@ -430,10 +414,16 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma // if type was tentatively unset, we now know it's actually a message fld.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_GROUP { - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: descriptor proto indicates type %v but should be %v", scope, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) } f.msgType = dsc case protoreflect.EnumDescriptor: + proto3 := r.Syntax() == protoreflect.Proto3 + enumIsProto3 := dsc.Syntax() == protoreflect.Proto3 + if fld.GetExtendee() == "" && proto3 && !enumIsProto3 { + // fields in a proto3 message cannot refer to proto2 enums + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: cannot use proto2 enum %s in a proto3 message", scope, fld.GetTypeName()) + } typeName := "." + string(dsc.FullName()) if fld.GetTypeName() != typeName { fld.TypeName = proto.String(typeName) @@ -442,11 +432,11 @@ func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees ma // the type was tentatively unset, but now we know it's actually an enum fld.Type = descriptorpb.FieldDescriptorProto_TYPE_ENUM.Enum() } else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_ENUM { - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_ENUM) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: descriptor proto indicates type %v but should be %v", scope, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_ENUM) } f.enumType = dsc default: - return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: invalid type: %s is %s, not a message or enum", kind, f.fqn, dsc.FullName(), descriptorTypeWithArticle(dsc)) + return handler.HandleErrorf(file.NodeInfo(node.FieldType()).Start(), "%s: invalid type: %s is %s, not a message or enum", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)) } return nil } @@ -466,23 +456,23 @@ func isValidMap(mapField protoreflect.FieldDescriptor, mapEntry protoreflect.Mes string(mapEntry.Name()) == internal.InitCap(internal.JSONName(string(mapField.Name())))+"Entry" } -func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []scope, checkedCache []string) error { - scope := "method " + m.fqn +func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []scope) error { + scope := fmt.Sprintf("method %s", m.fqn) r := m.file mtd := m.proto file := r.FileNode() node := r.MethodNode(mtd) - dsc := r.resolve(mtd.GetInputType(), false, scopes, checkedCache) + dsc := r.resolve(mtd.GetInputType(), false, scopes) if dsc == nil { - if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil { return err } } else if isSentinelDescriptor(dsc) { - if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetInputType(), dsc.FullName()); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: unknown request type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetInputType(), dsc.FullName()); err != nil { return err } } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { - if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: invalid request type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()).Start(), "%s: invalid request type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { return err } } else { @@ -494,17 +484,17 @@ func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []sc } // TODO: make input and output type resolution more DRY - dsc = r.resolve(mtd.GetOutputType(), false, scopes, checkedCache) + dsc = r.resolve(mtd.GetOutputType(), false, scopes) if dsc == nil { - if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil { return err } } else if isSentinelDescriptor(dsc) { - if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetOutputType(), dsc.FullName()); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: unknown response type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetOutputType(), dsc.FullName()); err != nil { return err } } else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok { - if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: invalid response type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()).Start(), "%s: invalid response type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil { return err } } else { @@ -518,7 +508,7 @@ func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []sc return nil } -func (r *result) resolveOptions(handler *reporter.Handler, elemType string, elemName protoreflect.FullName, opts []*descriptorpb.UninterpretedOption, scopes []scope, checkedCache []string) error { +func (r *result) resolveOptions(handler *reporter.Handler, elemType string, elemName protoreflect.FullName, opts []*descriptorpb.UninterpretedOption, scopes []scope) error { mc := &internal.MessageContext{ File: r, ElementName: string(elemName), @@ -531,9 +521,9 @@ opts: for _, nm := range opt.Name { if nm.GetIsExtension() { node := r.OptionNamePartNode(nm) - fqn, err := r.resolveExtensionName(nm.GetNamePart(), scopes, checkedCache) + fqn, err := r.resolveExtensionName(nm.GetNamePart(), scopes) if err != nil { - if err := handler.HandleErrorf(file.NodeInfo(node), "%v%v", mc, err); err != nil { + if err := handler.HandleErrorf(file.NodeInfo(node).Start(), "%v%v", mc, err); err != nil { return err } continue opts @@ -544,7 +534,7 @@ opts: // also resolve any extension names found inside message literals in option values mc.Option = opt optVal := r.OptionNode(opt).GetValue() - if err := r.resolveOptionValue(handler, mc, optVal, scopes, checkedCache); err != nil { + if err := r.resolveOptionValue(handler, mc, optVal, scopes); err != nil { return err } mc.Option = nil @@ -552,7 +542,7 @@ opts: return nil } -func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.MessageContext, val ast.ValueNode, scopes []scope, checkedCache []string) error { +func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.MessageContext, val ast.ValueNode, scopes []scope) error { optVal := val.Value() switch optVal := optVal.(type) { case []ast.ValueNode: @@ -562,7 +552,7 @@ func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.Mess }() for i, v := range optVal { mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, i) - if err := r.resolveOptionValue(handler, mc, v, scopes, checkedCache); err != nil { + if err := r.resolveOptionValue(handler, mc, v, scopes); err != nil { return err } } @@ -581,9 +571,9 @@ func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.Mess // likely due to how it re-uses C++ text format implementation, and normal text // format doesn't expect that kind of relative reference.) scopes := scopes[:1] // first scope is file, the rest are enclosing messages - fqn, err := r.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), scopes, checkedCache) + fqn, err := r.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), scopes) if err != nil { - if err := handler.HandleErrorf(r.FileNode().NodeInfo(fld.Name.Name), "%v%v", mc, err); err != nil { + if err := handler.HandleErrorf(r.FileNode().NodeInfo(fld.Name.Name).Start(), "%v%v", mc, err); err != nil { return err } } else { @@ -602,7 +592,7 @@ func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.Mess mc.OptAggPath = fmt.Sprintf("%s%s", mc.OptAggPath, string(fld.Name.Name.AsIdentifier())) } - if err := r.resolveOptionValue(handler, mc, fld.Val, scopes, checkedCache); err != nil { + if err := r.resolveOptionValue(handler, mc, fld.Val, scopes); err != nil { return err } } @@ -610,8 +600,8 @@ func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.Mess return nil } -func (r *result) resolveExtensionName(name string, scopes []scope, checkedCache []string) (string, error) { - dsc := r.resolve(name, false, scopes, checkedCache) +func (r *result) resolveExtensionName(name string, scopes []scope) (string, error) { + dsc := r.resolve(name, false, scopes) if dsc == nil { return "", fmt.Errorf("unknown extension %s", name) } @@ -626,10 +616,10 @@ func (r *result) resolveExtensionName(name string, scopes []scope, checkedCache return string("." + dsc.FullName()), nil } -func (r *result) resolve(name string, onlyTypes bool, scopes []scope, checkedCache []string) protoreflect.Descriptor { +func (r *result) resolve(name string, onlyTypes bool, scopes []scope) protoreflect.Descriptor { if strings.HasPrefix(name, ".") { // already fully-qualified - return r.resolveElement(protoreflect.FullName(name[1:]), checkedCache) + return r.resolveElement(protoreflect.FullName(name[1:])) } // unqualified, so we look in the enclosing (last) scope first and move // towards outermost (first) scope, trying to resolve the symbol @@ -674,13 +664,13 @@ func isType(d protoreflect.Descriptor) bool { // can be declared. type scope func(firstName, fullName string) protoreflect.Descriptor -func fileScope(r *result, checkedCache []string) scope { +func fileScope(r *result) scope { // we search symbols in this file, but also symbols in other files that have // the same package as this file or a "parent" package (in protobuf, // packages are a hierarchy like C++ namespaces) prefixes := internal.CreatePrefixList(r.FileDescriptorProto().GetPackage()) querySymbol := func(n string) protoreflect.Descriptor { - return r.resolveElement(protoreflect.FullName(n), checkedCache) + return r.resolveElement(protoreflect.FullName(n)) } return func(firstName, fullName string) protoreflect.Descriptor { for _, prefix := range prefixes { diff --git a/vendor/github.com/bufbuild/protocompile/linker/symbols.go b/vendor/github.com/bufbuild/protocompile/linker/symbols.go index c8db762b..54923218 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/symbols.go +++ b/vendor/github.com/bufbuild/protocompile/linker/symbols.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,11 +18,12 @@ import ( "strings" "sync" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" - "github.com/bufbuild/protocompile/protoutil" "github.com/bufbuild/protocompile/reporter" "github.com/bufbuild/protocompile/walk" ) @@ -37,14 +38,6 @@ const unknownFilePath = "" // This type is thread-safe. type Symbols struct { pkgTrie packageSymbols - - // We don't know the packages for these symbols, so we can't - // keep them in the pkgTrie. In vast majority of cases, this - // will always be empty/unused. When used, it ensures that - // multiple extension declarations don't refer to the same - // extension. - extDeclsMu sync.Mutex - extDecls map[protoreflect.FullName]extDecl } type packageSymbols struct { @@ -52,7 +45,7 @@ type packageSymbols struct { children map[protoreflect.FullName]*packageSymbols files map[protoreflect.FileDescriptor]struct{} symbols map[protoreflect.FullName]symbolEntry - exts map[extNumber]ast.SourceSpan + exts map[extNumber]ast.SourcePos } type extNumber struct { @@ -61,17 +54,11 @@ type extNumber struct { } type symbolEntry struct { - span ast.SourceSpan + pos ast.SourcePos isEnumValue bool isPackage bool } -type extDecl struct { - span ast.SourceSpan - extendee protoreflect.FullName - tag protoreflect.FieldNumber -} - // Import populates the symbol table with all symbols/elements and extension // tags present in the given file descriptor. If s is nil or if fd has already // been imported into s, this returns immediately without doing anything. If any @@ -82,22 +69,18 @@ func (s *Symbols) Import(fd protoreflect.FileDescriptor, handler *reporter.Handl return nil } - if f, ok := fd.(protoreflect.FileImport); ok { - // unwrap any import instance - fd = f.FileDescriptor - } if f, ok := fd.(*file); ok { // unwrap any file instance fd = f.FileDescriptor } - var pkgSpan ast.SourceSpan + var pkgPos ast.SourcePos if res, ok := fd.(*result); ok { - pkgSpan = packageNameSpan(res) + pkgPos = packageNameStart(res) } else { - pkgSpan = sourceSpanForPackage(fd) + pkgPos = sourcePositionForPackage(fd) } - pkg, err := s.importPackages(pkgSpan, fd.Package(), handler) + pkg, err := s.importPackages(pkgPos, fd.Package(), handler) if err != nil || pkg == nil { return err } @@ -138,9 +121,12 @@ func (s *Symbols) importFileWithExtensions(pkg *packageSymbols, fd protoreflect. if !ok || !fld.IsExtension() { return nil } - span := sourceSpanForNumber(fld) + pos := sourcePositionForNumber(fld) extendee := fld.ContainingMessage() - return s.AddExtension(packageFor(extendee), extendee.FullName(), fld.Number(), span, handler) + if err := s.AddExtension(packageFor(extendee), extendee.FullName(), fld.Number(), pos, handler); err != nil { + return err + } + return nil }) } @@ -168,20 +154,20 @@ func (s *packageSymbols) importFile(fd protoreflect.FileDescriptor, handler *rep return true, nil } -func (s *Symbols) importPackages(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { +func (s *Symbols) importPackages(pkgPos ast.SourcePos, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { if pkg == "" { return &s.pkgTrie, nil } + parts := strings.Split(string(pkg), ".") + for i := 1; i < len(parts); i++ { + parts[i] = parts[i-1] + "." + parts[i] + } + cur := &s.pkgTrie - enumerator := nameEnumerator{name: pkg} - for { - p, ok := enumerator.next() - if !ok { - return cur, nil - } + for _, p := range parts { var err error - cur, err = cur.importPackage(pkgSpan, p, handler) + cur, err = cur.importPackage(pkgPos, protoreflect.FullName(p), handler) if err != nil { return nil, err } @@ -189,9 +175,11 @@ func (s *Symbols) importPackages(pkgSpan ast.SourceSpan, pkg protoreflect.FullNa return nil, nil } } + + return cur, nil } -func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { +func (s *packageSymbols) importPackage(pkgPos ast.SourcePos, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) { s.mu.RLock() existing, ok := s.symbols[pkg] var child *packageSymbols @@ -204,7 +192,7 @@ func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect. // package already exists return child, nil } else if ok { - return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler) + return nil, reportSymbolCollision(pkgPos, pkg, false, existing, handler) } s.mu.Lock() @@ -215,12 +203,12 @@ func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect. // package already exists return s.children[pkg], nil } else if ok { - return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler) + return nil, reportSymbolCollision(pkgPos, pkg, false, existing, handler) } if s.symbols == nil { s.symbols = map[protoreflect.FullName]symbolEntry{} } - s.symbols[pkg] = symbolEntry{span: pkgSpan, isPackage: true} + s.symbols[pkg] = symbolEntry{pos: pkgPos, isPackage: true} child = &packageSymbols{} if s.children == nil { s.children = map[protoreflect.FullName]*packageSymbols{} @@ -229,32 +217,32 @@ func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect. return child, nil } -func (s *Symbols) getPackage(pkg protoreflect.FullName, exact bool) *packageSymbols { +func (s *Symbols) getPackage(pkg protoreflect.FullName) *packageSymbols { if pkg == "" { return &s.pkgTrie } + + parts := strings.Split(string(pkg), ".") + for i := 1; i < len(parts); i++ { + parts[i] = parts[i-1] + "." + parts[i] + } + cur := &s.pkgTrie - enumerator := nameEnumerator{name: pkg} - for { - p, ok := enumerator.next() - if !ok { - return cur - } + for _, p := range parts { cur.mu.RLock() - next := cur.children[p] + next := cur.children[protoreflect.FullName(p)] cur.mu.RUnlock() if next == nil { - if exact { - return nil - } - return cur + return nil } cur = next } + + return cur } -func reportSymbolCollision(span ast.SourceSpan, fqn protoreflect.FullName, additionIsEnumVal bool, existing symbolEntry, handler *reporter.Handler) error { +func reportSymbolCollision(pos ast.SourcePos, fqn protoreflect.FullName, additionIsEnumVal bool, existing symbolEntry, handler *reporter.Handler) error { // because of weird scoping for enum values, provide more context in error message // if this conflict is with an enum value var isPkg, suffix string @@ -264,12 +252,12 @@ func reportSymbolCollision(span ast.SourceSpan, fqn protoreflect.FullName, addit if existing.isPackage { isPkg = " as a package" } - orig := existing.span - conflict := span - if posLess(conflict.Start(), orig.Start()) { + orig := existing.pos + conflict := pos + if posLess(conflict, orig) { orig, conflict = conflict, orig } - return handler.HandleErrorf(conflict, "symbol %q already defined%s at %v%s", fqn, isPkg, orig.Start(), suffix) + return handler.HandleErrorf(conflict, "symbol %q already defined%s at %v%s", fqn, isPkg, orig, suffix) } func posLess(a, b ast.SourcePos) bool { @@ -284,10 +272,10 @@ func posLess(a, b ast.SourcePos) bool { func (s *packageSymbols) checkFileLocked(f protoreflect.FileDescriptor, handler *reporter.Handler) error { return walk.Descriptors(f, func(d protoreflect.Descriptor) error { - span := sourceSpanFor(d) + pos := sourcePositionFor(d) if existing, ok := s.symbols[d.FullName()]; ok { _, isEnumVal := d.(protoreflect.EnumValueDescriptor) - if err := reportSymbolCollision(span, d.FullName(), isEnumVal, existing, handler); err != nil { + if err := reportSymbolCollision(pos, d.FullName(), isEnumVal, existing, handler); err != nil { return err } } @@ -295,36 +283,26 @@ func (s *packageSymbols) checkFileLocked(f protoreflect.FileDescriptor, handler }) } -func sourceSpanForPackage(fd protoreflect.FileDescriptor) ast.SourceSpan { +func sourcePositionForPackage(fd protoreflect.FileDescriptor) ast.SourcePos { loc := fd.SourceLocations().ByPath([]int32{internal.FilePackageTag}) - if internal.IsZeroLocation(loc) { - return ast.UnknownSpan(fd.Path()) - } - return ast.NewSourceSpan( - ast.SourcePos{ - Filename: fd.Path(), - Line: loc.StartLine, - Col: loc.StartColumn, - }, - ast.SourcePos{ - Filename: fd.Path(), - Line: loc.EndLine, - Col: loc.EndColumn, - }, - ) + if isZeroLoc(loc) { + return ast.UnknownPos(fd.Path()) + } + return ast.SourcePos{ + Filename: fd.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } } -func sourceSpanFor(d protoreflect.Descriptor) ast.SourceSpan { +func sourcePositionFor(d protoreflect.Descriptor) ast.SourcePos { file := d.ParentFile() if file == nil { - return ast.UnknownSpan(unknownFilePath) - } - if result, ok := file.(*result); ok { - return nameSpan(result.FileNode(), result.Node(protoutil.ProtoFromDescriptor(d))) + return ast.UnknownPos(unknownFilePath) } - path, ok := internal.ComputePath(d) + path, ok := computePath(d) if !ok { - return ast.UnknownSpan(file.Path()) + return ast.UnknownPos(file.Path()) } namePath := path switch d.(type) { @@ -333,7 +311,7 @@ func sourceSpanFor(d protoreflect.Descriptor) ast.SourceSpan { case protoreflect.MessageDescriptor: namePath = append(namePath, internal.MessageNameTag) case protoreflect.OneofDescriptor: - namePath = append(namePath, internal.OneofNameTag) + namePath = append(namePath, internal.OneOfNameTag) case protoreflect.EnumDescriptor: namePath = append(namePath, internal.EnumNameTag) case protoreflect.EnumValueDescriptor: @@ -347,57 +325,50 @@ func sourceSpanFor(d protoreflect.Descriptor) ast.SourceSpan { // descriptor, sans name field } loc := file.SourceLocations().ByPath(namePath) - if internal.IsZeroLocation(loc) { + if isZeroLoc(loc) { loc = file.SourceLocations().ByPath(path) - if internal.IsZeroLocation(loc) { - return ast.UnknownSpan(file.Path()) + if isZeroLoc(loc) { + return ast.UnknownPos(file.Path()) } } - - return ast.NewSourceSpan( - ast.SourcePos{ - Filename: file.Path(), - Line: loc.StartLine, - Col: loc.StartColumn, - }, - ast.SourcePos{ - Filename: file.Path(), - Line: loc.EndLine, - Col: loc.EndColumn, - }, - ) + return ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } } -func sourceSpanForNumber(fd protoreflect.FieldDescriptor) ast.SourceSpan { +func sourcePositionForNumber(fd protoreflect.FieldDescriptor) ast.SourcePos { file := fd.ParentFile() if file == nil { - return ast.UnknownSpan(unknownFilePath) + return ast.UnknownPos(unknownFilePath) } - path, ok := internal.ComputePath(fd) + path, ok := computePath(fd) if !ok { - return ast.UnknownSpan(file.Path()) + return ast.UnknownPos(file.Path()) } numberPath := path numberPath = append(numberPath, internal.FieldNumberTag) loc := file.SourceLocations().ByPath(numberPath) - if internal.IsZeroLocation(loc) { + if isZeroLoc(loc) { loc = file.SourceLocations().ByPath(path) - if internal.IsZeroLocation(loc) { - return ast.UnknownSpan(file.Path()) + if isZeroLoc(loc) { + return ast.UnknownPos(file.Path()) } } - return ast.NewSourceSpan( - ast.SourcePos{ - Filename: file.Path(), - Line: loc.StartLine, - Col: loc.StartColumn, - }, - ast.SourcePos{ - Filename: file.Path(), - Line: loc.EndLine, - Col: loc.EndColumn, - }, - ) + return ast.SourcePos{ + Filename: file.Path(), + Line: loc.StartLine, + Col: loc.StartColumn, + } +} + +func isZeroLoc(loc protoreflect.SourceLocation) bool { + return loc.Path == nil && + loc.StartLine == 0 && + loc.StartColumn == 0 && + loc.EndLine == 0 && + loc.EndColumn == 0 } func (s *packageSymbols) commitFileLocked(f protoreflect.FileDescriptor) { @@ -405,13 +376,13 @@ func (s *packageSymbols) commitFileLocked(f protoreflect.FileDescriptor) { s.symbols = map[protoreflect.FullName]symbolEntry{} } if s.exts == nil { - s.exts = map[extNumber]ast.SourceSpan{} + s.exts = map[extNumber]ast.SourcePos{} } _ = walk.Descriptors(f, func(d protoreflect.Descriptor) error { - span := sourceSpanFor(d) + pos := sourcePositionFor(d) name := d.FullName() _, isEnumValue := d.(protoreflect.EnumValueDescriptor) - s.symbols[name] = symbolEntry{span: span, isEnumValue: isEnumValue} + s.symbols[name] = symbolEntry{pos: pos, isEnumValue: isEnumValue} return nil }) @@ -438,14 +409,18 @@ func (s *Symbols) importResultWithExtensions(pkg *packageSymbols, r *result, han } file := r.FileNode() node := r.FieldNode(fd.FieldDescriptorProto()) - info := file.NodeInfo(node.FieldTag()) + pos := file.NodeInfo(node.FieldTag()).Start() extendee := fd.ContainingMessage() - return s.AddExtension(packageFor(extendee), extendee.FullName(), fd.Number(), info, handler) + if err := s.AddExtension(packageFor(extendee), extendee.FullName(), fd.Number(), pos, handler); err != nil { + return err + } + + return nil }) } func (s *Symbols) importResult(r *result, handler *reporter.Handler) error { - pkg, err := s.importPackages(packageNameSpan(r), r.Package(), handler) + pkg, err := s.importPackages(packageNameStart(r), r.Package(), handler) if err != nil || pkg == nil { return err } @@ -471,34 +446,33 @@ func (s *packageSymbols) importResult(r *result, handler *reporter.Handler) (boo } // second pass: commit all symbols - s.commitFileLocked(r) + s.commitResultLocked(r) return true, nil } func (s *packageSymbols) checkResultLocked(r *result, handler *reporter.Handler) error { resultSyms := map[protoreflect.FullName]symbolEntry{} - return walk.Descriptors(r, func(d protoreflect.Descriptor) error { - _, isEnumVal := d.(protoreflect.EnumValueDescriptor) + return walk.DescriptorProtos(r.FileDescriptorProto(), func(fqn protoreflect.FullName, d proto.Message) error { + _, isEnumVal := d.(*descriptorpb.EnumValueDescriptorProto) file := r.FileNode() - name := d.FullName() - node := r.Node(protoutil.ProtoFromDescriptor(d)) - span := nameSpan(file, node) + node := r.Node(d) + pos := nameStart(file, node) // check symbols already in this symbol table - if existing, ok := s.symbols[name]; ok { - if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil { + if existing, ok := s.symbols[fqn]; ok { + if err := reportSymbolCollision(pos, fqn, isEnumVal, existing, handler); err != nil { return err } } // also check symbols from this result (that are not yet in symbol table) - if existing, ok := resultSyms[name]; ok { - if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil { + if existing, ok := resultSyms[fqn]; ok { + if err := reportSymbolCollision(pos, fqn, isEnumVal, existing, handler); err != nil { return err } } - resultSyms[name] = symbolEntry{ - span: span, + resultSyms[fqn] = symbolEntry{ + pos: pos, isEnumValue: isEnumVal, } @@ -506,130 +480,88 @@ func (s *packageSymbols) checkResultLocked(r *result, handler *reporter.Handler) }) } -func packageNameSpan(r *result) ast.SourceSpan { +func packageNameStart(r *result) ast.SourcePos { if node, ok := r.FileNode().(*ast.FileNode); ok { for _, decl := range node.Decls { if pkgNode, ok := decl.(*ast.PackageNode); ok { - return r.FileNode().NodeInfo(pkgNode.Name) + return r.FileNode().NodeInfo(pkgNode.Name).Start() } } } - return ast.UnknownSpan(r.Path()) + return ast.UnknownPos(r.Path()) } -func nameSpan(file ast.FileDeclNode, n ast.Node) ast.SourceSpan { +func nameStart(file ast.FileDeclNode, n ast.Node) ast.SourcePos { // TODO: maybe ast package needs a NamedNode interface to simplify this? switch n := n.(type) { case ast.FieldDeclNode: - return file.NodeInfo(n.FieldName()) + return file.NodeInfo(n.FieldName()).Start() case ast.MessageDeclNode: - return file.NodeInfo(n.MessageName()) - case ast.OneofDeclNode: - return file.NodeInfo(n.OneofName()) + return file.NodeInfo(n.MessageName()).Start() + case ast.OneOfDeclNode: + return file.NodeInfo(n.OneOfName()).Start() case ast.EnumValueDeclNode: - return file.NodeInfo(n.GetName()) + return file.NodeInfo(n.GetName()).Start() case *ast.EnumNode: - return file.NodeInfo(n.Name) + return file.NodeInfo(n.Name).Start() case *ast.ServiceNode: - return file.NodeInfo(n.Name) + return file.NodeInfo(n.Name).Start() case ast.RPCDeclNode: - return file.NodeInfo(n.GetName()) + return file.NodeInfo(n.GetName()).Start() default: - return file.NodeInfo(n) + return file.NodeInfo(n).Start() + } +} + +func (s *packageSymbols) commitResultLocked(r *result) { + if s.symbols == nil { + s.symbols = map[protoreflect.FullName]symbolEntry{} + } + if s.exts == nil { + s.exts = map[extNumber]ast.SourcePos{} } + _ = walk.DescriptorProtos(r.FileDescriptorProto(), func(fqn protoreflect.FullName, d proto.Message) error { + pos := nameStart(r.FileNode(), r.Node(d)) + _, isEnumValue := d.(protoreflect.EnumValueDescriptor) + s.symbols[fqn] = symbolEntry{pos: pos, isEnumValue: isEnumValue} + return nil + }) + + if s.files == nil { + s.files = map[protoreflect.FileDescriptor]struct{}{} + } + s.files[r] = struct{}{} } -// AddExtension records the given extension, which is used to ensure that no two files -// attempt to extend the same message using the same tag. The given pkg should be the -// package that defines extendee. -func (s *Symbols) AddExtension(pkg, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { +func (s *Symbols) AddExtension(pkg, extendee protoreflect.FullName, tag protoreflect.FieldNumber, pos ast.SourcePos, handler *reporter.Handler) error { if pkg != "" { if !strings.HasPrefix(string(extendee), string(pkg)+".") { - return handler.HandleErrorf(span, "could not register extension: extendee %q does not match package %q", extendee, pkg) + return handler.HandleErrorf(pos, "could not register extension: extendee %q does not match package %q", extendee, pkg) } } - pkgSyms := s.getPackage(pkg, true) + pkgSyms := s.getPackage(pkg) if pkgSyms == nil { // should never happen - return handler.HandleErrorf(span, "could not register extension: missing package symbols for %q", pkg) + return handler.HandleErrorf(pos, "could not register extension: missing package symbols for %q", pkg) } - return pkgSyms.addExtension(extendee, tag, span, handler) + return pkgSyms.addExtension(extendee, tag, pos, handler) } -func (s *packageSymbols) addExtension(extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { +func (s *packageSymbols) addExtension(extendee protoreflect.FullName, tag protoreflect.FieldNumber, pos ast.SourcePos, handler *reporter.Handler) error { s.mu.Lock() defer s.mu.Unlock() - extNum := extNumber{extendee: extendee, tag: tag} - if existing, ok := s.exts[extNum]; ok { - return handler.HandleErrorf(span, "extension with tag %d for message %s already defined at %v", tag, extendee, existing.Start()) - } - if s.exts == nil { - s.exts = map[extNumber]ast.SourceSpan{} + s.exts = map[extNumber]ast.SourcePos{} } - s.exts[extNum] = span - return nil -} -// AddExtensionDeclaration records the given extension declaration, which is used to -// ensure that no two declarations refer to the same extension. -func (s *Symbols) AddExtensionDeclaration(extension, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error { - s.extDeclsMu.Lock() - defer s.extDeclsMu.Unlock() - existing, ok := s.extDecls[extension] - if ok { - if existing.extendee == extendee && existing.tag == tag { - // This is a declaration that has already been added. Ignore. - return nil + extNum := extNumber{extendee: extendee, tag: tag} + if existing, ok := s.exts[extNum]; ok { + if err := handler.HandleErrorf(pos, "extension with tag %d for message %s already defined at %v", tag, extendee, existing); err != nil { + return err } - return handler.HandleErrorf(span, "extension %s already declared as extending %s with tag %d at %v", extension, existing.extendee, existing.tag, existing.span.Start()) - } - if s.extDecls == nil { - s.extDecls = map[protoreflect.FullName]extDecl{} - } - s.extDecls[extension] = extDecl{ - span: span, - extendee: extendee, - tag: tag, - } - return nil -} - -// Lookup finds the registered location of the given name. If the given name has -// not been seen/registered, nil is returned. -func (s *Symbols) Lookup(name protoreflect.FullName) ast.SourceSpan { - // note: getPackage never returns nil when exact=false - pkgSyms := s.getPackage(name, false) - if entry, ok := pkgSyms.symbols[name]; ok { - return entry.span + } else { + s.exts[extNum] = pos } return nil } - -// LookupExtension finds the registered location of the given extension. If the given -// extension has not been seen/registered, nil is returned. -func (s *Symbols) LookupExtension(messageName protoreflect.FullName, extensionNumber protoreflect.FieldNumber) ast.SourceSpan { - // note: getPackage never returns nil when exact=false - pkgSyms := s.getPackage(messageName, false) - return pkgSyms.exts[extNumber{messageName, extensionNumber}] -} - -type nameEnumerator struct { - name protoreflect.FullName - start int -} - -func (e *nameEnumerator) next() (protoreflect.FullName, bool) { - if e.start < 0 { - return "", false - } - pos := strings.IndexByte(string(e.name[e.start:]), '.') - if pos == -1 { - e.start = -1 - return e.name, true - } - pos += e.start - e.start = pos + 1 - return e.name[:pos], true -} diff --git a/vendor/github.com/bufbuild/protocompile/linker/validate.go b/vendor/github.com/bufbuild/protocompile/linker/validate.go index 6633a9f3..f30c5718 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/validate.go +++ b/vendor/github.com/bufbuild/protocompile/linker/validate.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ package linker import ( "fmt" - "math" "strings" "unicode" "unicode/utf8" @@ -24,503 +23,117 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" - "github.com/bufbuild/protocompile/protoutil" "github.com/bufbuild/protocompile/reporter" - "github.com/bufbuild/protocompile/walk" ) // ValidateOptions runs some validation checks on the result that can only // be done after options are interpreted. -func (r *result) ValidateOptions(handler *reporter.Handler, symbols *Symbols) error { - if err := r.validateFile(handler); err != nil { +func (r *result) ValidateOptions(handler *reporter.Handler) error { + if err := r.validateExtensions(r, handler); err != nil { return err } - return walk.Descriptors(r, func(d protoreflect.Descriptor) error { - switch d := d.(type) { - case protoreflect.FieldDescriptor: - if err := r.validateField(d, handler); err != nil { - return err - } - case protoreflect.MessageDescriptor: - if symbols == nil { - symbols = &Symbols{} - } - if err := r.validateMessage(d, handler, symbols); err != nil { - return err - } - case protoreflect.EnumDescriptor: - if err := r.validateEnum(d, handler); err != nil { - return err - } - } - return nil - }) + return r.validateJSONNamesInFile(handler) } -func (r *result) validateFile(handler *reporter.Handler) error { - opts := r.FileDescriptorProto().GetOptions() - if opts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME { - // Non-lite files may not import lite files. - imports := r.Imports() - for i, length := 0, imports.Len(); i < length; i++ { - dep := imports.Get(i) - depOpts, ok := dep.Options().(*descriptorpb.FileOptions) - if !ok { - continue // what else to do? - } - if depOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME { - err := handler.HandleErrorf(r.getImportLocation(dep.Path()), "a file that does not use optimize_for=LITE_RUNTIME may not import file %q that does", dep.Path()) - if err != nil { - return err - } - } - } - } - if isEditions(r) { - // Validate features - if opts.GetFeatures().GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED { - span := r.findOptionSpan(r, internal.FileOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) - err := handler.HandleErrorf(span, "LEGACY_REQUIRED field presence cannot be set as the default for a file") - if err != nil { - return err - } - } - if opts != nil && opts.JavaStringCheckUtf8 != nil { - span := r.findOptionSpan(r, internal.FileOptionsJavaStringCheckUTF8Tag) - err := handler.HandleErrorf(span, `file option java_string_check_utf8 is not allowed with editions; import "google/protobuf/java_features.proto" and use (pb.java).utf8_validation instead`) - if err != nil { - return err - } - } - } - return nil -} - -func (r *result) validateField(fld protoreflect.FieldDescriptor, handler *reporter.Handler) error { - if xtd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { - fld = xtd.Descriptor() - } - fd, ok := fld.(*fldDescriptor) - if !ok { - // should not be possible - return fmt.Errorf("field descriptor is wrong type: expecting %T, got %T", (*fldDescriptor)(nil), fld) - } - - if err := r.validatePacked(fd, handler); err != nil { - return err - } - if fd.Kind() == protoreflect.EnumKind { - requiresOpen := !fd.IsList() && !fd.HasPresence() - if requiresOpen && fd.Enum().IsClosed() { - // Fields in a proto3 message cannot refer to proto2 enums. - // In editions, this translates to implicit presence fields - // not being able to refer to closed enums. - // TODO: This really should be based solely on whether the enum's first - // value is zero, NOT based on if it's open vs closed. - // https://github.com/protocolbuffers/protobuf/issues/16249 - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) - if err := handler.HandleErrorf(info, "cannot use closed enum %s in a field with implicit presence", fd.Enum().FullName()); err != nil { - return err - } - } - } - if fd.HasDefault() && !fd.HasPresence() { - span := r.findScalarOptionSpan(r.FieldNode(fd.proto), "default") - err := handler.HandleErrorf(span, "default value is not allowed on fields with implicit presence") - if err != nil { +func (r *result) validateExtensions(d hasExtensionsAndMessages, handler *reporter.Handler) error { + for i := 0; i < d.Extensions().Len(); i++ { + if err := r.validateExtension(d.Extensions().Get(i), handler); err != nil { return err } } - if fd.proto.Options != nil && fd.proto.Options.Ctype != nil { - if descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2024 { - // We don't support edition 2024 yet, but we went ahead and mimic'ed this check - // from protoc, which currently has experimental support for 2024. - span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) - if err := handler.HandleErrorf(span, "ctype option cannot be used as of edition 2024; use features.string_type instead"); err != nil { - return err - } - } else if descriptorpb.Edition(r.Edition()) == descriptorpb.Edition_EDITION_2023 { - if fld.Kind() != protoreflect.StringKind && fld.Kind() != protoreflect.BytesKind { - span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) - if err := handler.HandleErrorf(span, "ctype option can only be used on string and bytes fields"); err != nil { - return err - } - } - if fd.proto.Options.GetCtype() == descriptorpb.FieldOptions_CORD && fd.IsExtension() { - span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag) - if err := handler.HandleErrorf(span, "ctype option cannot be CORD for extension fields"); err != nil { - return err - } - } - } - } - if (fd.proto.Options.GetLazy() || fd.proto.Options.GetUnverifiedLazy()) && fd.Kind() != protoreflect.MessageKind { - var span ast.SourceSpan - var optionName string - if fd.proto.Options.GetLazy() { - span = r.findOptionSpan(fd, internal.FieldOptionsLazyTag) - optionName = "lazy" - } else { - span = r.findOptionSpan(fd, internal.FieldOptionsUnverifiedLazyTag) - optionName = "unverified_lazy" - } - var suffix string - if fd.Kind() == protoreflect.GroupKind { - if isEditions(r) { - suffix = " that use length-prefixed encoding" - } else { - suffix = ", not groups" - } - } - if err := handler.HandleErrorf(span, "%s option can only be used with message fields%s", optionName, suffix); err != nil { + for i := 0; i < d.Messages().Len(); i++ { + if err := r.validateExtensions(d.Messages().Get(i), handler); err != nil { return err } } - if fd.proto.Options.GetJstype() != descriptorpb.FieldOptions_JS_NORMAL { - switch fd.Kind() { - case protoreflect.Int64Kind, protoreflect.Uint64Kind, protoreflect.Sint64Kind, - protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind: - // allowed only for 64-bit integer types - default: - span := r.findOptionSpan(fd, internal.FieldOptionsJSTypeTag) - err := handler.HandleErrorf(span, "only 64-bit integer fields (int64, uint64, sint64, fixed64, and sfixed64) can specify a jstype other than JS_NORMAL") - if err != nil { - return err - } - } - } - if isEditions(r) { - if err := r.validateFieldFeatures(fd, handler); err != nil { - return err - } - } - - if fld.IsExtension() { - // More checks if this is an extension field. - if err := r.validateExtension(fd, handler); err != nil { - return err - } - } - return nil } -func (r *result) validateExtension(fd *fldDescriptor, handler *reporter.Handler) error { +func (r *result) validateExtension(fld protoreflect.FieldDescriptor, handler *reporter.Handler) error { // NB: It's a little gross that we don't enforce these in validateBasic(). // But it requires linking to resolve the extendee, so we can interrogate // its descriptor. - msg := fd.ContainingMessage() - if msg.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() { + if xtd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok { + fld = xtd.Descriptor() + } + fd := fld.(*fldDescriptor) //nolint:errcheck + if fld.ContainingMessage().Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() { // Message set wire format requires that all extensions be messages // themselves (no scalar extensions) - if fd.Kind() != protoreflect.MessageKind { + if fld.Kind() != protoreflect.MessageKind { file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) - err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain scalar extensions, only messages") - if err != nil { - return err - } + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldType()).Start() + return handler.HandleErrorf(pos, "messages with message-set wire format cannot contain scalar extensions, only messages") } - if fd.Cardinality() == protoreflect.Repeated { + if fld.Cardinality() == protoreflect.Repeated { file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) - err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain repeated extensions, only optional") - if err != nil { - return err - } + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()).Start() + return handler.HandleErrorf(pos, "messages with message-set wire format cannot contain repeated extensions, only optional") } - } else if fd.Number() > internal.MaxNormalTag { + } else if fld.Number() > internal.MaxNormalTag { // In validateBasic() we just made sure these were within bounds for any message. But // now that things are linked, we can check if the extendee is messageset wire format // and, if not, enforce tighter limit. file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) - err := handler.HandleErrorf(info, "tag number %d is higher than max allowed tag number (%d)", fd.Number(), internal.MaxNormalTag) - if err != nil { - return err - } - } - - fileOpts := r.FileDescriptorProto().GetOptions() - if fileOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME { - extendeeFileOpts, _ := msg.ParentFile().Options().(*descriptorpb.FileOptions) - if extendeeFileOpts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto)) - err := handler.HandleErrorf(info, "extensions in a file that uses optimize_for=LITE_RUNTIME may not extend messages in file %q which does not", msg.ParentFile().Path()) - if err != nil { - return err - } - } - } - - // If the extendee uses extension declarations, make sure this extension matches. - md := protoutil.ProtoFromMessageDescriptor(msg) - for i, extRange := range md.ExtensionRange { - if int32(fd.Number()) < extRange.GetStart() || int32(fd.Number()) >= extRange.GetEnd() { - continue - } - extRangeOpts := extRange.GetOptions() - if extRangeOpts == nil { - break - } - if len(extRangeOpts.Declaration) == 0 && extRangeOpts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION { - break - } - var found bool - for j, extDecl := range extRangeOpts.Declaration { - if extDecl.GetNumber() != int32(fd.Number()) { - continue - } - found = true - if extDecl.GetReserved() { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) - span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationReservedTag) - err := handler.HandleErrorf(info, "cannot use field number %d for an extension because it is reserved in declaration at %v", - fd.Number(), span.Start()) - if err != nil { - return err - } - break - } - if extDecl.GetFullName() != "."+string(fd.FullName()) { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldName()) - span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationFullNameTag) - err := handler.HandleErrorf(info, "expected extension with number %d to be named %s, not %s, per declaration at %v", - fd.Number(), strings.TrimPrefix(extDecl.GetFullName(), "."), fd.FullName(), span.Start()) - if err != nil { - return err - } - } - if extDecl.GetType() != getTypeName(fd) { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) - span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationTypeTag) - err := handler.HandleErrorf(info, "expected extension with number %d to have type %s, not %s, per declaration at %v", - fd.Number(), strings.TrimPrefix(extDecl.GetType(), "."), getTypeName(fd), span.Start()) - if err != nil { - return err - } - } - if extDecl.GetRepeated() != (fd.Cardinality() == protoreflect.Repeated) { - expected, actual := "repeated", "optional" - if !extDecl.GetRepeated() { - expected, actual = actual, expected - } - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) - span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationRepeatedTag) - err := handler.HandleErrorf(info, "expected extension with number %d to be %s, not %s, per declaration at %v", - fd.Number(), expected, actual, span.Start()) - if err != nil { - return err - } - } - break - } - if !found { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()) - span, _ := findExtensionRangeOptionSpan(fd.ParentFile(), msg, i, extRange, - internal.ExtensionRangeOptionsVerificationTag) - err := handler.HandleErrorf(info, "expected extension with number %d to be declared in type %s, but no declaration found at %v", - fd.Number(), fd.ContainingMessage().FullName(), span.Start()) - if err != nil { - return err - } - } + pos := file.NodeInfo(r.FieldNode(fd.proto).FieldTag()).Start() + return handler.HandleErrorf(pos, "tag number %d is higher than max allowed tag number (%d)", fld.Number(), internal.MaxNormalTag) } return nil } -func (r *result) validatePacked(fd *fldDescriptor, handler *reporter.Handler) error { - if fd.proto.Options != nil && fd.proto.Options.Packed != nil && isEditions(r) { - span := r.findOptionSpan(fd, internal.FieldOptionsPackedTag) - err := handler.HandleErrorf(span, "packed option cannot be used with editions; use features.repeated_field_encoding=PACKED instead") - if err != nil { +func (r *result) validateJSONNamesInFile(handler *reporter.Handler) error { + for _, md := range r.FileDescriptorProto().GetMessageType() { + if err := r.validateJSONNamesInMessage(md, handler); err != nil { return err } } - if !fd.proto.GetOptions().GetPacked() { - // if packed isn't true, nothing to validate - return nil - } - if fd.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel()) - err := handler.HandleErrorf(info, "packed option is only allowed on repeated fields") - if err != nil { - return err - } - } - switch fd.proto.GetType() { - case descriptorpb.FieldDescriptorProto_TYPE_STRING, descriptorpb.FieldDescriptorProto_TYPE_BYTES, - descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, descriptorpb.FieldDescriptorProto_TYPE_GROUP: - file := r.FileNode() - info := file.NodeInfo(r.FieldNode(fd.proto).FieldType()) - err := handler.HandleErrorf(info, "packed option is only allowed on numeric, boolean, and enum fields") - if err != nil { + for _, ed := range r.FileDescriptorProto().GetEnumType() { + if err := r.validateJSONNamesInEnum(ed, handler); err != nil { return err } } return nil } -func (r *result) validateFieldFeatures(fld *fldDescriptor, handler *reporter.Handler) error { - if msg, ok := fld.Parent().(*msgDescriptor); ok && msg.proto.GetOptions().GetMapEntry() { - // Skip validating features on fields of synthetic map entry messages. - // We blindly propagate them from the map field's features, but some may - // really only apply to the map field and not to a key or value entry field. - return nil - } - features := fld.proto.GetOptions().GetFeatures() - if features == nil { - // No features to validate. - return nil - } - if features.FieldPresence != nil { - switch { - case fld.proto.OneofIndex != nil: - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) - if err := handler.HandleErrorf(span, "oneof fields may not specify field presence"); err != nil { - return err - } - case fld.Cardinality() == protoreflect.Repeated: - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) - if err := handler.HandleErrorf(span, "repeated fields may not specify field presence"); err != nil { - return err - } - case fld.IsExtension(): - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) - if err := handler.HandleErrorf(span, "extension fields may not specify field presence"); err != nil { - return err - } - case fld.Message() != nil && features.GetFieldPresence() == descriptorpb.FeatureSet_IMPLICIT: - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag) - if err := handler.HandleErrorf(span, "message fields may not specify implicit presence"); err != nil { - return err - } - } - } - if features.RepeatedFieldEncoding != nil { - if fld.Cardinality() != protoreflect.Repeated { - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag) - if err := handler.HandleErrorf(span, "only repeated fields may specify repeated field encoding"); err != nil { - return err - } - } else if !internal.CanPack(fld.Kind()) && features.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED { - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag) - if err := handler.HandleErrorf(span, "only repeated primitive fields may specify packed encoding"); err != nil { - return err - } - } - } - if features.Utf8Validation != nil { - isMap := fld.IsMap() - if (!isMap && fld.Kind() != protoreflect.StringKind) || - (isMap && - fld.MapKey().Kind() != protoreflect.StringKind && - fld.MapValue().Kind() != protoreflect.StringKind) { - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetUTF8ValidationTag) - if err := handler.HandleErrorf(span, "only string fields may specify UTF8 validation"); err != nil { - return err - } - } - } - if features.MessageEncoding != nil { - if fld.Message() == nil || fld.IsMap() { - span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetMessageEncodingTag) - if err := handler.HandleErrorf(span, "only message fields may specify message encoding"); err != nil { - return err - } - } - } - return nil -} - -func (r *result) validateMessage(d protoreflect.MessageDescriptor, handler *reporter.Handler, symbols *Symbols) error { - md, ok := d.(*msgDescriptor) - if !ok { - // should not be possible - return fmt.Errorf("message descriptor is wrong type: expecting %T, got %T", (*msgDescriptor)(nil), d) - } - - if err := r.validateJSONNamesInMessage(md, handler); err != nil { - return err - } - - return r.validateExtensionDeclarations(md, handler, symbols) -} - -func (r *result) validateJSONNamesInMessage(md *msgDescriptor, handler *reporter.Handler) error { +func (r *result) validateJSONNamesInMessage(md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { if err := r.validateFieldJSONNames(md, false, handler); err != nil { return err } if err := r.validateFieldJSONNames(md, true, handler); err != nil { return err } - return nil -} - -func (r *result) validateEnum(d protoreflect.EnumDescriptor, handler *reporter.Handler) error { - ed, ok := d.(*enumDescriptor) - if !ok { - // should not be possible - return fmt.Errorf("enum descriptor is wrong type: expecting %T, got %T", (*enumDescriptor)(nil), d) - } - firstValue := ed.Values().Get(0) - if !ed.IsClosed() && firstValue.Number() != 0 { - // TODO: This check doesn't really belong here. Whether the - // first value is zero s/b orthogonal to whether the - // allowed values are open or closed. - // https://github.com/protocolbuffers/protobuf/issues/16249 - file := r.FileNode() - evd, ok := firstValue.(*enValDescriptor) - if !ok { - // should not be possible - return fmt.Errorf("enum value descriptor is wrong type: expecting %T, got %T", (*enValDescriptor)(nil), firstValue) - } - info := file.NodeInfo(r.EnumValueNode(evd.proto).GetNumber()) - if err := handler.HandleErrorf(info, "first value of open enum %s must have numeric value zero", ed.FullName()); err != nil { + for _, nmd := range md.GetNestedType() { + if err := r.validateJSONNamesInMessage(nmd, handler); err != nil { return err } } - - if err := r.validateJSONNamesInEnum(ed, handler); err != nil { - return err + for _, ed := range md.GetEnumType() { + if err := r.validateJSONNamesInEnum(ed, handler); err != nil { + return err + } } - return nil } -func (r *result) validateJSONNamesInEnum(ed *enumDescriptor, handler *reporter.Handler) error { +func (r *result) validateJSONNamesInEnum(ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { seen := map[string]*descriptorpb.EnumValueDescriptorProto{} - for _, evd := range ed.proto.GetValue() { - scope := "enum value " + ed.proto.GetName() + "." + evd.GetName() + for _, evd := range ed.GetValue() { + scope := "enum value " + ed.GetName() + "." + evd.GetName() - name := canonicalEnumValueName(evd.GetName(), ed.proto.GetName()) + name := canonicalEnumValueName(evd.GetName(), ed.GetName()) if existing, ok := seen[name]; ok && evd.GetNumber() != existing.GetNumber() { fldNode := r.EnumValueNode(evd) existingNode := r.EnumValueNode(existing) conflictErr := fmt.Errorf("%s: camel-case name (with optional enum name prefix removed) %q conflicts with camel-case name of enum value %s, defined at %v", scope, name, existing.GetName(), r.FileNode().NodeInfo(existingNode).Start()) - // Since proto2 did not originally have a JSON format, we report conflicts as just warnings. - // With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT - if !isJSONCompliant(ed) { - handler.HandleWarningWithPos(r.FileNode().NodeInfo(fldNode), conflictErr) - } else if err := handler.HandleErrorWithPos(r.FileNode().NodeInfo(fldNode), conflictErr); err != nil { + // Since proto2 did not originally have a JSON format, we report conflicts as just warnings + if r.Syntax() != protoreflect.Proto3 { + handler.HandleWarningWithPos(r.FileNode().NodeInfo(fldNode).Start(), conflictErr) + } else if err := handler.HandleErrorf(r.FileNode().NodeInfo(fldNode).Start(), conflictErr.Error()); err != nil { return err } } else { @@ -530,7 +143,7 @@ func (r *result) validateJSONNamesInEnum(ed *enumDescriptor, handler *reporter.H return nil } -func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handler *reporter.Handler) error { +func (r *result) validateFieldJSONNames(md *descriptorpb.DescriptorProto, useCustom bool, handler *reporter.Handler) error { type jsonName struct { source *descriptorpb.FieldDescriptorProto // true if orig is a custom JSON name (vs. the field's default JSON name) @@ -538,8 +151,8 @@ func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handl } seen := map[string]jsonName{} - for _, fd := range md.proto.GetField() { - scope := "field " + md.proto.GetName() + "." + fd.GetName() + for _, fd := range md.GetField() { + scope := "field " + md.GetName() + "." + fd.GetName() defaultName := internal.JSONName(fd.GetName()) name := defaultName custom := false @@ -563,14 +176,13 @@ func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handl if !existing.custom { srcCustomStr = "default" } - info := r.FileNode().NodeInfo(fldNode) - conflictErr := reporter.Errorf(info, "%s: %s JSON name %q conflicts with %s JSON name of field %s, defined at %v", + pos := r.FileNode().NodeInfo(fldNode).Start() + conflictErr := reporter.Errorf(pos, "%s: %s JSON name %q conflicts with %s JSON name of field %s, defined at %v", scope, customStr, name, srcCustomStr, existing.source.GetName(), r.FileNode().NodeInfo(r.FieldNode(existing.source)).Start()) // Since proto2 did not originally have default JSON names, we report conflicts // between default names (neither is a custom name) as just warnings. - // With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT - if !isJSONCompliant(md) && !custom && !existing.custom { + if r.Syntax() != protoreflect.Proto3 && !custom && !existing.custom { handler.HandleWarning(conflictErr) } else if err := handler.HandleError(conflictErr); err != nil { return err @@ -583,146 +195,6 @@ func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handl return nil } -func (r *result) validateExtensionDeclarations(md *msgDescriptor, handler *reporter.Handler, symbols *Symbols) error { - for i, extRange := range md.proto.ExtensionRange { - opts := extRange.GetOptions() - if len(opts.GetDeclaration()) == 0 { - // nothing to check - continue - } - // If any declarations are present, verification is assumed to be - // DECLARATION. It's an error for declarations to be present but the - // verification field explicitly set to something other than that. - if opts.Verification != nil && opts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION { - span, ok := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsVerificationTag) - if !ok { - span, _ = findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, 0) - } - if err := handler.HandleErrorf(span, "extension range cannot have declarations and have verification of %s", opts.GetVerification()); err != nil { - return err - } - } - declsByTag := map[int32]ast.SourcePos{} - for i, extDecl := range extRange.GetOptions().GetDeclaration() { - if extDecl.Number == nil { - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) - if err := handler.HandleErrorf(span, "extension declaration is missing required field number"); err != nil { - return err - } - } else { - extensionNumberSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationNumberTag) - if extDecl.GetNumber() < extRange.GetStart() || extDecl.GetNumber() >= extRange.GetEnd() { - // Number is out of range. - // See if one of the other ranges on the same extends statement includes the number, - // so we can provide a helpful message. - var suffix string - if extRange, ok := r.ExtensionsNode(extRange).(*ast.ExtensionRangeNode); ok { - for _, rng := range extRange.Ranges { - start, _ := rng.StartVal.AsInt64() - var end int64 - switch { - case rng.Max != nil: - end = math.MaxInt64 - case rng.EndVal != nil: - end, _ = rng.EndVal.AsInt64() - default: - end = start - } - if int64(extDecl.GetNumber()) >= start && int64(extDecl.GetNumber()) <= end { - // Found another range that matches - suffix = "; when using declarations, extends statements should indicate only a single span of field numbers" - break - } - } - } - err := handler.HandleErrorf(extensionNumberSpan, "extension declaration has number outside the range: %d not in [%d,%d]%s", - extDecl.GetNumber(), extRange.GetStart(), extRange.GetEnd()-1, suffix) - if err != nil { - return err - } - } else { - // Valid number; make sure it's not a duplicate - if existing, ok := declsByTag[extDecl.GetNumber()]; ok { - err := handler.HandleErrorf(extensionNumberSpan, "extension for tag number %d already declared at %v", - extDecl.GetNumber(), existing) - if err != nil { - return err - } - } else { - declsByTag[extDecl.GetNumber()] = extensionNumberSpan.Start() - } - } - } - - if extDecl.FullName == nil && !extDecl.GetReserved() { - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) - if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a full_name"); err != nil { - return err - } - } else if extDecl.FullName != nil { - var extensionFullName protoreflect.FullName - extensionNameSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationFullNameTag) - if !strings.HasPrefix(extDecl.GetFullName(), ".") { - if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q should start with a leading dot (.)", extDecl.GetFullName()); err != nil { - return err - } - extensionFullName = protoreflect.FullName(extDecl.GetFullName()) - } else { - extensionFullName = protoreflect.FullName(extDecl.GetFullName()[1:]) - } - if !extensionFullName.IsValid() { - if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q is not a valid qualified name", extDecl.GetFullName()); err != nil { - return err - } - } - if err := symbols.AddExtensionDeclaration(extensionFullName, md.FullName(), protoreflect.FieldNumber(extDecl.GetNumber()), extensionNameSpan, handler); err != nil { - return err - } - } - - if extDecl.Type == nil && !extDecl.GetReserved() { - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i)) - if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a type"); err != nil { - return err - } - } else if extDecl.Type != nil { - if strings.HasPrefix(extDecl.GetType(), ".") { - if !protoreflect.FullName(extDecl.GetType()[1:]).IsValid() { - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag) - if err := handler.HandleErrorf(span, "extension declaration type %q is not a valid qualified name", extDecl.GetType()); err != nil { - return err - } - } - } else if !isBuiltinTypeName(extDecl.GetType()) { - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag) - if err := handler.HandleErrorf(span, "extension declaration type %q must be a builtin type or start with a leading dot (.)", extDecl.GetType()); err != nil { - return err - } - } - } - - if extDecl.GetReserved() && (extDecl.FullName == nil) != (extDecl.Type == nil) { - var fieldTag int32 - if extDecl.FullName != nil { - fieldTag = internal.ExtensionRangeOptionsDeclarationFullNameTag - } else { - fieldTag = internal.ExtensionRangeOptionsDeclarationTypeTag - } - span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, - internal.ExtensionRangeOptionsDeclarationTag, int32(i), fieldTag) - if err := handler.HandleErrorf(span, "extension declarations that are reserved should specify both full_name and type or neither"); err != nil { - return err - } - } - } - } - return nil -} - func (r *result) hasCustomJSONName(fdProto *descriptorpb.FieldDescriptorProto) bool { // if we have the AST, we can more precisely determine if there was a custom // JSON named defined, even if it is explicitly configured to tbe the same @@ -809,345 +281,3 @@ func enumValCamelCase(name string) string { } return string(js) } - -func isBuiltinTypeName(typeName string) bool { - switch typeName { - case "int32", "int64", "uint32", "uint64", "sint32", "sint64", - "fixed32", "fixed64", "sfixed32", "sfixed64", - "bool", "double", "float", "string", "bytes": - return true - default: - return false - } -} - -func getTypeName(fd protoreflect.FieldDescriptor) string { - switch fd.Kind() { - case protoreflect.MessageKind, protoreflect.GroupKind: - return "." + string(fd.Message().FullName()) - case protoreflect.EnumKind: - return "." + string(fd.Enum().FullName()) - default: - return fd.Kind().String() - } -} - -func findExtensionRangeOptionSpan( - file protoreflect.FileDescriptor, - extended protoreflect.MessageDescriptor, - extRangeIndex int, - extRange *descriptorpb.DescriptorProto_ExtensionRange, - path ...int32, -) (ast.SourceSpan, bool) { - // NB: Typically, we have an AST for a file and NOT source code info, because the - // compiler validates options before computing source code info. However, we might - // be validating an extension (whose source/AST we have), but whose extendee (and - // thus extension range options for declarations) could be in some other file, which - // could be provided to the compiler as an already-compiled descriptor. So this - // function can fallback to using source code info if an AST is not available. - - if r, ok := file.(Result); ok && r.AST() != nil { - // Find the location using the AST, which will generally be higher fidelity - // than what we might find in a file descriptor's source code info. - exts := r.ExtensionsNode(extRange) - return findOptionSpan(r.FileNode(), exts, extRange.Options.ProtoReflect().Descriptor(), path...) - } - - srcLocs := file.SourceLocations() - if srcLocs.Len() == 0 { - // no source code info, can't do any better than the filename. We - // return true as the boolean so the caller doesn't try again with - // an alternate path, since we won't be able to do any better. - return ast.UnknownSpan(file.Path()), true - } - msgPath, ok := internal.ComputePath(extended) - if !ok { - // Same as above: return true since no subsequent query can do better. - return ast.UnknownSpan(file.Path()), true - } - - //nolint:gocritic // intentionally assigning to different slice variables - extRangePath := append(msgPath, internal.MessageExtensionRangesTag, int32(extRangeIndex)) - optsPath := append(extRangePath, internal.ExtensionRangeOptionsTag) //nolint:gocritic - fullPath := append(optsPath, path...) //nolint:gocritic - srcLoc := srcLocs.ByPath(fullPath) - if srcLoc.Path != nil { - // found it - return asSpan(file.Path(), srcLoc), true - } - - // Slow path to find closest match :/ - // We look for longest matching path that is at least len(extRangePath) - // long. If we find a path that is longer (meaning a path that points INSIDE - // the request element), accept the first such location. - var bestMatch protoreflect.SourceLocation - var bestMatchPathLen int - for i, length := 0, srcLocs.Len(); i < length; i++ { - srcLoc := srcLocs.Get(i) - if len(srcLoc.Path) >= len(extRangePath) && - isDescendantPath(fullPath, srcLoc.Path) && - len(srcLoc.Path) > bestMatchPathLen { - bestMatch = srcLoc - bestMatchPathLen = len(srcLoc.Path) - } else if isDescendantPath(srcLoc.Path, path) { - return asSpan(file.Path(), srcLoc), false - } - } - if bestMatchPathLen > 0 { - return asSpan(file.Path(), bestMatch), false - } - return ast.UnknownSpan(file.Path()), false -} - -func (r *result) findScalarOptionSpan( - root ast.NodeWithOptions, - name string, -) ast.SourceSpan { - match := ast.Node(root) - root.RangeOptions(func(n *ast.OptionNode) bool { - if len(n.Name.Parts) == 1 && !n.Name.Parts[0].IsExtension() && - string(n.Name.Parts[0].Name.AsIdentifier()) == name { - match = n - return false - } - return true - }) - return r.FileNode().NodeInfo(match) -} - -func (r *result) findOptionSpan( - d protoutil.DescriptorProtoWrapper, - path ...int32, -) ast.SourceSpan { - node := r.Node(d.AsProto()) - nodeWithOpts, ok := node.(ast.NodeWithOptions) - if !ok { - return r.FileNode().NodeInfo(node) - } - span, _ := findOptionSpan(r.FileNode(), nodeWithOpts, d.Options().ProtoReflect().Descriptor(), path...) - return span -} - -func findOptionSpan( - file ast.FileDeclNode, - root ast.NodeWithOptions, - md protoreflect.MessageDescriptor, - path ...int32, -) (ast.SourceSpan, bool) { - bestMatch := ast.Node(root) - var bestMatchLen int - var repeatedIndices []int - root.RangeOptions(func(n *ast.OptionNode) bool { - desc := md - limit := len(n.Name.Parts) - if limit > len(path) { - limit = len(path) - } - var nextIsIndex bool - for i := 0; i < limit; i++ { - if desc == nil || nextIsIndex { - // Can't match anymore. Try next option. - return true - } - wantField := desc.Fields().ByNumber(protoreflect.FieldNumber(path[i])) - if wantField == nil { - // Should not be possible... next option won't fare any better since - // it's a disagreement between given path and given descriptor so bail. - return false - } - if n.Name.Parts[i].Open != nil || - string(n.Name.Parts[i].Name.AsIdentifier()) != string(wantField.Name()) { - // This is an extension/custom option or indicates the wrong name. - // Try the next one. - return true - } - desc = wantField.Message() - nextIsIndex = wantField.Cardinality() == protoreflect.Repeated - } - // If we made it this far, we've matched everything so far. - if len(n.Name.Parts) >= len(path) { - // Either an exact match (if equal) or this option points *inside* the - // item we care about (if greater). Either way, the first such result - // is a keeper. - bestMatch = n.Name.Parts[len(path)-1] - bestMatchLen = len(n.Name.Parts) - return false - } - // We've got more path elements to try to match with the value. - match, matchLen := findMatchingValueNode( - desc, - path[len(n.Name.Parts):], - nextIsIndex, - 0, - &repeatedIndices, - n, - n.Val) - if match != nil { - totalMatchLen := matchLen + len(n.Name.Parts) - if totalMatchLen > bestMatchLen { - bestMatch, bestMatchLen = match, totalMatchLen - } - } - return bestMatchLen != len(path) // no exact match, so keep looking - }) - return file.NodeInfo(bestMatch), bestMatchLen == len(path) -} - -func findMatchingValueNode( - md protoreflect.MessageDescriptor, - path protoreflect.SourcePath, - currIsRepeated bool, - repeatedCount int, - repeatedIndices *[]int, - node ast.Node, - val ast.ValueNode, -) (ast.Node, int) { - var matchLen int - var index int - if currIsRepeated { - // Compute the index of the current value (or, if an array literal, the - // index of the first value in the array). - if len(*repeatedIndices) > repeatedCount { - (*repeatedIndices)[repeatedCount]++ - index = (*repeatedIndices)[repeatedCount] - } else { - *repeatedIndices = append(*repeatedIndices, 0) - index = 0 - } - repeatedCount++ - } - - if arrayVal, ok := val.(*ast.ArrayLiteralNode); ok { - if !currIsRepeated { - // This should not happen. - return nil, 0 - } - offset := int(path[0]) - index - if offset >= len(arrayVal.Elements) { - // The index we are looking for is not in this array. - return nil, 0 - } - elem := arrayVal.Elements[offset] - // We've matched the index! - matchLen++ - path = path[1:] - // Recurse into array element. - nextMatch, nextMatchLen := findMatchingValueNode( - md, - path, - false, - repeatedCount, - repeatedIndices, - elem, - elem, - ) - return nextMatch, nextMatchLen + matchLen - } - - if currIsRepeated { - if index != int(path[0]) { - // Not a match! - return nil, 0 - } - // We've matched the index! - matchLen++ - path = path[1:] - if len(path) == 0 { - // We're done matching! - return node, matchLen - } - } - - msgValue, ok := val.(*ast.MessageLiteralNode) - if !ok { - // We can't go any further - return node, matchLen - } - - var wantField protoreflect.FieldDescriptor - if md != nil { - wantField = md.Fields().ByNumber(protoreflect.FieldNumber(path[0])) - } - if wantField == nil { - // Should not be possible... next option won't fare any better since - // it's a disagreement between given path and given descriptor so bail. - return nil, 0 - } - for _, field := range msgValue.Elements { - if field.Name.Open != nil || - string(field.Name.Name.AsIdentifier()) != string(wantField.Name()) { - // This is an extension/custom option or indicates the wrong name. - // Try the next one. - continue - } - // We've matched this field. - matchLen++ - path = path[1:] - if len(path) == 0 { - // Perfect match! - return field, matchLen - } - nextMatch, nextMatchLen := findMatchingValueNode( - wantField.Message(), - path, - wantField.Cardinality() == protoreflect.Repeated, - repeatedCount, - repeatedIndices, - field, - field.Val, - ) - return nextMatch, nextMatchLen + matchLen - } - - // If we didn't find the right field, just return what we have so far. - return node, matchLen -} - -func isDescendantPath(descendant, ancestor protoreflect.SourcePath) bool { - if len(descendant) < len(ancestor) { - return false - } - for i := range ancestor { - if descendant[i] != ancestor[i] { - return false - } - } - return true -} - -func asSpan(file string, srcLoc protoreflect.SourceLocation) ast.SourceSpan { - return ast.NewSourceSpan( - ast.SourcePos{ - Filename: file, - Line: srcLoc.StartLine + 1, - Col: srcLoc.StartColumn + 1, - }, - ast.SourcePos{ - Filename: file, - Line: srcLoc.EndLine + 1, - Col: srcLoc.EndColumn + 1, - }, - ) -} - -func (r *result) getImportLocation(path string) ast.SourceSpan { - node, ok := r.FileNode().(*ast.FileNode) - if !ok { - return ast.UnknownSpan(path) - } - for _, decl := range node.Decls { - imp, ok := decl.(*ast.ImportNode) - if !ok { - continue - } - if imp.Name.AsString() == path { - return node.NodeInfo(imp.Name) - } - } - // Couldn't find it? Should never happen... - return ast.UnknownSpan(path) -} - -func isEditions(r *result) bool { - return descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2023 -} diff --git a/vendor/github.com/bufbuild/protocompile/options/options.go b/vendor/github.com/bufbuild/protocompile/options/options.go index e22bb2af..c26f136c 100644 --- a/vendor/github.com/bufbuild/protocompile/options/options.go +++ b/vendor/github.com/bufbuild/protocompile/options/options.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,46 +27,45 @@ package options import ( "bytes" - "errors" "fmt" "math" + "sort" "strings" - "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/dynamicpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" - "github.com/bufbuild/protocompile/internal/messageset" "github.com/bufbuild/protocompile/linker" "github.com/bufbuild/protocompile/parser" "github.com/bufbuild/protocompile/reporter" - "github.com/bufbuild/protocompile/sourceinfo" ) -type interpreter struct { - file file - resolver linker.Resolver - overrideDescriptorProto linker.File - - index sourceinfo.OptionIndex - pathBuffer []int32 +// Index is a mapping of AST nodes that define options to a corresponding path +// into the containing file descriptor. The path is a sequence of field tags +// and indexes that define a traversal path from the root (the file descriptor) +// to the resolved option field. +type Index map[*ast.OptionNode][]int32 - reporter *reporter.Handler - lenient bool - - // lenienceEnabled is set to true when errors reported to reporter - // should be lenient - lenienceEnabled bool - lenientErrReported bool +type interpreter struct { + file file + resolver linker.Resolver + container optionsContainer + lenient bool + reporter *reporter.Handler + index Index } type file interface { parser.Result + ResolveEnumType(protoreflect.FullName) protoreflect.EnumDescriptor + ResolveMessageType(protoreflect.FullName) protoreflect.MessageDescriptor + ResolveOptionsType(protoreflect.FullName) protoreflect.MessageDescriptor + ResolveExtension(protoreflect.FullName) protoreflect.ExtensionTypeDescriptor ResolveMessageLiteralExtensionName(ast.IdentValueNode) string } @@ -74,23 +73,24 @@ type noResolveFile struct { parser.Result } -func (n noResolveFile) ResolveMessageLiteralExtensionName(ast.IdentValueNode) string { - return "" +func (n noResolveFile) ResolveEnumType(name protoreflect.FullName) protoreflect.EnumDescriptor { + return nil } -// InterpreterOption is an option that can be passed to InterpretOptions and -// its variants. -type InterpreterOption func(*interpreter) +func (n noResolveFile) ResolveMessageType(name protoreflect.FullName) protoreflect.MessageDescriptor { + return nil +} -// WithOverrideDescriptorProto returns an option that indicates that the given file -// should be consulted when looking up a definition for an option type. The given -// file should usually have the path "google/protobuf/descriptor.proto". The given -// file will only be consulted if the option type is otherwise not visible to the -// file whose options are being interpreted. -func WithOverrideDescriptorProto(f linker.File) InterpreterOption { - return func(interp *interpreter) { - interp.overrideDescriptorProto = f - } +func (n noResolveFile) ResolveOptionsType(name protoreflect.FullName) protoreflect.MessageDescriptor { + return nil +} + +func (n noResolveFile) ResolveExtension(name protoreflect.FullName) protoreflect.ExtensionTypeDescriptor { + return nil +} + +func (n noResolveFile) ResolveMessageLiteralExtensionName(ast.IdentValueNode) string { + return "" } // InterpretOptions interprets options in the given linked result, returning @@ -100,8 +100,8 @@ func WithOverrideDescriptorProto(f linker.File) InterpreterOption { // // The given handler is used to report errors and warnings. If any errors are // reported, this function returns a non-nil error. -func InterpretOptions(linked linker.Result, handler *reporter.Handler, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { - return interpretOptions(false, linked, linker.ResolverFromFile(linked), handler, opts) +func InterpretOptions(linked linker.Result, handler *reporter.Handler) (Index, error) { + return interpretOptions(false, linked, handler) } // InterpretOptionsLenient interprets options in a lenient/best-effort way in @@ -113,8 +113,8 @@ func InterpretOptions(linked linker.Result, handler *reporter.Handler, opts ...I // In lenient more, errors resolving option names and type errors are ignored. // Any options that are uninterpretable (due to such errors) will remain in the // "uninterpreted_option" fields. -func InterpretOptionsLenient(linked linker.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { - return interpretOptions(true, linked, linker.ResolverFromFile(linked), reporter.NewHandler(nil), opts) +func InterpretOptionsLenient(linked linker.Result) (Index, error) { + return interpretOptions(true, linked, reporter.NewHandler(nil)) } // InterpretUnlinkedOptions does a best-effort attempt to interpret options in @@ -128,326 +128,217 @@ func InterpretOptionsLenient(linked linker.Result, opts ...InterpreterOption) (s // interpreted. Other errors resolving option names or type errors will be // effectively ignored. Any options that are uninterpretable (due to such // errors) will remain in the "uninterpreted_option" fields. -func InterpretUnlinkedOptions(parsed parser.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) { - return interpretOptions(true, noResolveFile{parsed}, nil, reporter.NewHandler(nil), opts) +func InterpretUnlinkedOptions(parsed parser.Result) (Index, error) { + return interpretOptions(true, noResolveFile{parsed}, reporter.NewHandler(nil)) } -func interpretOptions(lenient bool, file file, res linker.Resolver, handler *reporter.Handler, interpOpts []InterpreterOption) (sourceinfo.OptionIndex, error) { - interp := &interpreter{ - file: file, - resolver: res, - lenient: lenient, - reporter: handler, - index: sourceinfo.OptionIndex{}, - pathBuffer: make([]int32, 0, 16), - } - for _, opt := range interpOpts { - opt(interp) - } - // We have to do this in two phases. First we interpret non-custom options. - // This allows us to handle standard options and features that may needed to - // correctly reference the custom options in the second phase. - if err := interp.interpretFileOptions(file, false); err != nil { - return nil, err +func interpretOptions(lenient bool, file file, handler *reporter.Handler) (Index, error) { + interp := interpreter{ + file: file, + lenient: lenient, + reporter: handler, + index: Index{}, } - // Now we can do custom options. - if err := interp.interpretFileOptions(file, true); err != nil { - return nil, err + interp.container, _ = file.(optionsContainer) + if f, ok := file.(linker.File); ok { + interp.resolver = linker.ResolverFromFile(f) } - return interp.index, nil -} -func (interp *interpreter) handleErrorf(span ast.SourceSpan, msg string, args ...interface{}) error { - if interp.lenienceEnabled { - interp.lenientErrReported = true - return nil - } - return interp.reporter.HandleErrorf(span, msg, args...) -} - -func (interp *interpreter) handleErrorWithPos(span ast.SourceSpan, err error) error { - if interp.lenienceEnabled { - interp.lenientErrReported = true - return nil - } - return interp.reporter.HandleErrorWithPos(span, err) -} - -func (interp *interpreter) handleError(err error) error { - if interp.lenienceEnabled { - interp.lenientErrReported = true - return nil - } - return interp.reporter.HandleError(err) -} - -func (interp *interpreter) interpretFileOptions(file file, customOpts bool) error { fd := file.FileDescriptorProto() prefix := fd.GetPackage() if prefix != "" { prefix += "." } - err := interpretElementOptions(interp, fd.GetName(), targetTypeFile, fd, customOpts) - if err != nil { - return err + opts := fd.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fd.GetName(), fd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain + } } for _, md := range fd.GetMessageType() { fqn := prefix + md.GetName() - if err := interp.interpretMessageOptions(fqn, md, customOpts); err != nil { - return err + if err := interp.interpretMessageOptions(fqn, md); err != nil { + return nil, err } } for _, fld := range fd.GetExtension() { fqn := prefix + fld.GetName() - if err := interp.interpretFieldOptions(fqn, fld, customOpts); err != nil { - return err + if err := interp.interpretFieldOptions(fqn, fld); err != nil { + return nil, err } } for _, ed := range fd.GetEnumType() { fqn := prefix + ed.GetName() - if err := interp.interpretEnumOptions(fqn, ed, customOpts); err != nil { - return err + if err := interp.interpretEnumOptions(fqn, ed); err != nil { + return nil, err } } for _, sd := range fd.GetService() { fqn := prefix + sd.GetName() - err := interpretElementOptions(interp, fqn, targetTypeService, sd, customOpts) - if err != nil { - return err + opts := sd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(fqn, sd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain } for _, mtd := range sd.GetMethod() { mtdFqn := fqn + "." + mtd.GetName() - err := interpretElementOptions(interp, mtdFqn, targetTypeMethod, mtd, customOpts) - if err != nil { - return err + opts := mtd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(mtdFqn, mtd, opts, opts.UninterpretedOption) + if err != nil { + return nil, err + } + opts.UninterpretedOption = remain } } } - return nil -} - -func resolveDescriptor[T protoreflect.Descriptor](res linker.Resolver, name string) T { - var zero T - if res == nil { - return zero - } - if len(name) > 0 && name[0] == '.' { - name = name[1:] - } - desc, _ := res.FindDescriptorByName(protoreflect.FullName(name)) - typedDesc, ok := desc.(T) - if ok { - return typedDesc - } - return zero -} - -func (interp *interpreter) resolveExtensionType(name string) (protoreflect.ExtensionTypeDescriptor, error) { - if interp.resolver == nil { - return nil, protoregistry.NotFound - } - if len(name) > 0 && name[0] == '.' { - name = name[1:] - } - ext, err := interp.resolver.FindExtensionByName(protoreflect.FullName(name)) - if err != nil { - return nil, err - } - return ext.TypeDescriptor(), nil -} - -func (interp *interpreter) resolveOptionsType(name string) protoreflect.MessageDescriptor { - md := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, name) - if md != nil { - return md - } - if interp.overrideDescriptorProto == nil { - return nil - } - if len(name) > 0 && name[0] == '.' { - name = name[1:] - } - desc := interp.overrideDescriptorProto.FindDescriptorByName(protoreflect.FullName(name)) - if md, ok := desc.(protoreflect.MessageDescriptor); ok { - return md - } - return nil + return interp.index, nil } func (interp *interpreter) nodeInfo(n ast.Node) ast.NodeInfo { return interp.file.FileNode().NodeInfo(n) } -func (interp *interpreter) interpretMessageOptions(fqn string, md *descriptorpb.DescriptorProto, customOpts bool) error { - err := interpretElementOptions(interp, fqn, targetTypeMessage, md, customOpts) - if err != nil { - return err +func (interp *interpreter) interpretMessageOptions(fqn string, md *descriptorpb.DescriptorProto) error { + opts := md.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fqn, md, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain + } } for _, fld := range md.GetField() { fldFqn := fqn + "." + fld.GetName() - if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil { + if err := interp.interpretFieldOptions(fldFqn, fld); err != nil { return err } } for _, ood := range md.GetOneofDecl() { oodFqn := fqn + "." + ood.GetName() - err := interpretElementOptions(interp, oodFqn, targetTypeOneof, ood, customOpts) - if err != nil { - return err + opts := ood.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(oodFqn, ood, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain } } for _, fld := range md.GetExtension() { fldFqn := fqn + "." + fld.GetName() - if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil { + if err := interp.interpretFieldOptions(fldFqn, fld); err != nil { return err } } for _, er := range md.GetExtensionRange() { erFqn := fmt.Sprintf("%s.%d-%d", fqn, er.GetStart(), er.GetEnd()) - err := interpretElementOptions(interp, erFqn, targetTypeExtensionRange, er, customOpts) - if err != nil { - return err + opts := er.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(erFqn, er, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain } } for _, nmd := range md.GetNestedType() { nmdFqn := fqn + "." + nmd.GetName() - if err := interp.interpretMessageOptions(nmdFqn, nmd, customOpts); err != nil { + if err := interp.interpretMessageOptions(nmdFqn, nmd); err != nil { return err } } for _, ed := range md.GetEnumType() { edFqn := fqn + "." + ed.GetName() - if err := interp.interpretEnumOptions(edFqn, ed, customOpts); err != nil { + if err := interp.interpretEnumOptions(edFqn, ed); err != nil { return err } } - - // We also copy features for map fields down to their synthesized key and value fields. - for _, fld := range md.GetField() { - entryName := internal.InitCap(internal.JSONName(fld.GetName())) + "Entry" - if fld.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED || - fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && - fld.GetTypeName() != "."+fqn+"."+entryName { - // can't be a map field - continue - } - if fld.Options == nil || fld.Options.Features == nil { - // no features to propagate - continue - } - for _, nmd := range md.GetNestedType() { - if nmd.GetName() == entryName { - // found the entry message - if !nmd.GetOptions().GetMapEntry() { - break // not a map - } - for _, mapField := range nmd.Field { - if mapField.Options == nil { - mapField.Options = &descriptorpb.FieldOptions{} - } - features := proto.Clone(fld.Options.Features).(*descriptorpb.FeatureSet) //nolint:errcheck - if mapField.Options.Features != nil { - proto.Merge(features, mapField.Options.Features) - } - mapField.Options.Features = features - } - break - } - } - } - return nil } -var emptyFieldOptions = &descriptorpb.FieldOptions{} - -func (interp *interpreter) interpretFieldOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, customOpts bool) error { +func (interp *interpreter) interpretFieldOptions(fqn string, fld *descriptorpb.FieldDescriptorProto) error { opts := fld.GetOptions() - emptyOptionsAlreadyPresent := opts != nil && len(opts.GetUninterpretedOption()) == 0 - - // For non-custom phase, first process pseudo-options - if len(opts.GetUninterpretedOption()) > 0 && !customOpts { - interp.enableLenience(true) - err := interp.interpretFieldPseudoOptions(fqn, fld, opts) - interp.enableLenience(false) - if err != nil { - return err - } - } - - // Must re-check length of uninterpreted options since above step could remove some. if len(opts.GetUninterpretedOption()) == 0 { - // If the message has no other interpreted options, we clear it out. But don't - // do that if the descriptor came in with empty options or if it already has - // interpreted option fields. - if opts != nil && !emptyOptionsAlreadyPresent && proto.Equal(fld.Options, emptyFieldOptions) { - fld.Options = nil - } return nil } - - // Then process actual options. - return interpretElementOptions(interp, fqn, targetTypeField, fld, customOpts) -} - -func (interp *interpreter) interpretFieldPseudoOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) error { - scope := "field " + fqn uo := opts.UninterpretedOption + scope := fmt.Sprintf("field %s", fqn) // process json_name pseudo-option - if index, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uo, "json_name"); err != nil { + index, err := internal.FindOption(interp.file, interp.reporter, scope, uo, "json_name") + if err != nil && !interp.lenient { return err - } else if index >= 0 { + } + if index >= 0 { opt := uo[index] optNode := interp.file.OptionNode(opt) - if opt.StringValue == nil { - return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: expecting string value for json_name option", scope) - } - jsonName := string(opt.StringValue) - // Extensions don't support custom json_name values. - // If the value is already set (via the descriptor) and doesn't match the default value, return an error. - if fld.GetExtendee() != "" && jsonName != "" && jsonName != internal.JSONName(fld.GetName()) { - return interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: option json_name is not allowed on extensions", scope) + if fld.GetExtendee() != "" { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: option json_name is not allowed on extensions", scope) } // attribute source code info if on, ok := optNode.(*ast.OptionNode); ok { - interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldJSONNameTag}} + interp.index[on] = []int32{-1, internal.FieldJSONNameTag} } uo = internal.RemoveOption(uo, index) - if strings.HasPrefix(jsonName, "[") && strings.HasSuffix(jsonName, "]") { - return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: option json_name value cannot start with '[' and end with ']'; that is reserved for representing extensions", scope) + if opt.StringValue == nil { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetValue()).Start(), "%s: expecting string value for json_name option", scope) + } + name := string(opt.StringValue) + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + return interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetValue()).Start(), "%s: option json_name value cannot start with '[' and end with ']'; that is reserved for representing extensions", scope) } - fld.JsonName = proto.String(jsonName) + fld.JsonName = proto.String(name) } // and process default pseudo-option - if index, err := interp.processDefaultOption(scope, fqn, fld, uo); err != nil { + if index, err := interp.processDefaultOption(scope, fqn, fld, uo); err != nil && !interp.lenient { return err } else if index >= 0 { // attribute source code info optNode := interp.file.OptionNode(uo[index]) if on, ok := optNode.(*ast.OptionNode); ok { - interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldDefaultTag}} + interp.index[on] = []int32{-1, internal.FieldDefaultTag} } uo = internal.RemoveOption(uo, index) } - opts.UninterpretedOption = uo + if len(uo) == 0 { + // no real options, only pseudo-options above? clear out options + fld.Options = nil + } else if remain, err := interp.interpretOptions(fqn, fld, opts, uo); err != nil { + return err + } else { + opts.UninterpretedOption = remain + } return nil } func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *descriptorpb.FieldDescriptorProto, uos []*descriptorpb.UninterpretedOption) (defaultIndex int, err error) { - found, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uos, "default") + found, err := internal.FindOption(interp.file, interp.reporter, scope, uos, "default") if err != nil || found == -1 { return -1, err } opt := uos[found] optNode := interp.file.OptionNode(opt) if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { - return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is repeated", scope) + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: default value cannot be set because field is repeated", scope) } if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE { - return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is a message", scope) + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(optNode.GetName()).Start(), "%s: default value cannot be set because field is a message", scope) + } + val := optNode.GetValue() + if _, ok := val.(*ast.MessageLiteralNode); ok { + return -1, interp.reporter.HandleErrorf(interp.nodeInfo(val).Start(), "%s: default value cannot be a message", scope) } mc := &internal.MessageContext{ File: interp.file, @@ -455,20 +346,20 @@ func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *d ElementType: descriptorType(fld), Option: opt, } - - val := optNode.GetValue() var v interface{} - if val.Value() == nil { - // no value in the AST, so we dig the value out of the uninterpreted option proto - v, err = interp.defaultValueFromProto(mc, fld, opt, val) + if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { + ed := interp.file.ResolveEnumType(protoreflect.FullName(fld.GetTypeName())) + _, name, err := interp.enumFieldValue(mc, ed, val, false) + if err != nil { + return -1, interp.reporter.HandleError(err) + } + v = string(name) } else { - // compute value from AST - v, err = interp.defaultValue(mc, fld, val) - } - if err != nil { - return -1, interp.handleError(err) + v, err = interp.scalarFieldValue(mc, fld.GetType(), val, false) + if err != nil { + return -1, interp.reporter.HandleError(err) + } } - if str, ok := v.(string); ok { fld.DefaultValue = proto.String(str) } else if b, ok := v.([]byte); ok { @@ -500,130 +391,309 @@ func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *d return found, nil } -func (interp *interpreter) defaultValue(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, val ast.ValueNode) (interface{}, error) { - if _, ok := val.(*ast.MessageLiteralNode); ok { - return -1, reporter.Errorf(interp.nodeInfo(val), "%vdefault value cannot be a message", mc) - } - if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { - ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName()) - if ed == nil { - return -1, reporter.Errorf(interp.nodeInfo(val), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName()) +func encodeDefaultBytes(b []byte) string { + var buf bytes.Buffer + internal.WriteEscapedBytes(&buf, b) + return buf.String() +} + +func (interp *interpreter) interpretEnumOptions(fqn string, ed *descriptorpb.EnumDescriptorProto) error { + opts := ed.GetOptions() + if opts != nil { + if len(opts.UninterpretedOption) > 0 { + remain, err := interp.interpretOptions(fqn, ed, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain } - _, name, err := interp.enumFieldValue(mc, ed, val, false) - if err != nil { - return -1, err + } + for _, evd := range ed.GetValue() { + evdFqn := fqn + "." + evd.GetName() + opts := evd.GetOptions() + if len(opts.GetUninterpretedOption()) > 0 { + remain, err := interp.interpretOptions(evdFqn, evd, opts, opts.UninterpretedOption) + if err != nil { + return err + } + opts.UninterpretedOption = remain } - return string(name), nil } - return interp.scalarFieldValue(mc, fld.GetType(), val, false) + return nil } -func (interp *interpreter) defaultValueFromProto(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, opt *descriptorpb.UninterpretedOption, node ast.Node) (interface{}, error) { - if opt.AggregateValue != nil { - return -1, reporter.Errorf(interp.nodeInfo(node), "%vdefault value cannot be a message", mc) - } - if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM { - ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName()) - if ed == nil { - return -1, reporter.Errorf(interp.nodeInfo(node), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName()) - } - _, name, err := interp.enumFieldValueFromProto(mc, ed, opt, node) - if err != nil { - return nil, err - } - return string(name), nil +// interpretedOption represents the result of interpreting an option. +// This includes metadata that allows the option to be serialized to +// bytes in a way that is deterministic and can preserve the structure +// of the source (the way the options are de-structured and the order in +// which options appear). +type interpretedOption struct { + unknown bool + pathPrefix []int32 + interpretedField +} + +func (o *interpretedOption) path() []int32 { + path := o.pathPrefix + path = append(path, o.number) + if o.repeated { + path = append(path, o.index) } - return interp.scalarFieldValueFromProto(mc, fld.GetType(), opt, node) + return path } -func encodeDefaultBytes(b []byte) string { - var buf bytes.Buffer - internal.WriteEscapedBytes(&buf, b) - return buf.String() +func (o *interpretedOption) appendOptionBytes(b []byte) ([]byte, error) { + return o.appendOptionBytesWithPath(b, o.pathPrefix) } -func (interp *interpreter) interpretEnumOptions(fqn string, ed *descriptorpb.EnumDescriptorProto, customOpts bool) error { - err := interpretElementOptions(interp, fqn, targetTypeEnum, ed, customOpts) +func (o *interpretedOption) appendOptionBytesWithPath(b []byte, path []int32) ([]byte, error) { + if len(path) == 0 { + return appendOptionBytesSingle(b, &o.interpretedField) + } + // NB: if we add functions to compute sizes of the options first, we could + // allocate precisely sized slice up front, which would be more efficient than + // repeated creation/growing/concatenation. + enclosed, err := o.appendOptionBytesWithPath(nil, path[1:]) if err != nil { - return err + return nil, err } - for _, evd := range ed.GetValue() { - evdFqn := fqn + "." + evd.GetName() - err := interpretElementOptions(interp, evdFqn, targetTypeEnumValue, evd, customOpts) - if err != nil { - return err + b = protowire.AppendTag(b, protowire.Number(path[0]), protowire.BytesType) + return protowire.AppendBytes(b, enclosed), nil +} + +// interpretedField represents a field in an options message that is the +// result of interpreting an option. This is used for the option value +// itself as well as for subfields when an option value is a message +// literal. +type interpretedField struct { + // field number + number int32 + // index of this element inside a repeated field; only set if repeated == true + index int32 + // true if this is a repeated field + repeated bool + // true if this is a repeated field that stores scalar values in packed form + packed bool + // the field's kind + kind protoreflect.Kind + + value interpretedFieldValue +} + +// interpretedFieldValue is a wrapper around protoreflect.Value that +// includes extra metadata. +type interpretedFieldValue struct { + // the field value + val protoreflect.Value + // if true, this value is a list of values, not a singular value + isList bool + // non-nil for singular message values + msgVal []*interpretedField + // non-nil for non-empty lists of message values + msgListVal [][]*interpretedField +} + +func appendOptionBytes(b []byte, flds []*interpretedField) ([]byte, error) { + // protoc emits messages sorted by field number + if len(flds) > 1 { + sort.SliceStable(flds, func(i, j int) bool { + return flds[i].number < flds[j].number + }) + } + + for i := 0; i < len(flds); i++ { + f := flds[i] + switch { + case f.packed && canPack(f.kind): + // for packed repeated numeric fields, all runs of values are merged into one packed list + num := f.number + j := i + for j < len(flds) && flds[j].number == num { + j++ + } + // now flds[i:j] is the range of contiguous fields for the same field number + enclosed, err := appendOptionBytesPacked(nil, f.kind, flds[i:j]) + if err != nil { + return nil, err + } + b = protowire.AppendTag(b, protowire.Number(f.number), protowire.BytesType) + b = protowire.AppendBytes(b, enclosed) + // skip over the other subsequent fields we just serialized + i = j - 1 + case f.value.isList: + // if not packed, then emit one value at a time + single := *f + single.value.isList = false + single.value.msgListVal = nil + l := f.value.val.List() + for i := 0; i < l.Len(); i++ { + single.value.val = l.Get(i) + if f.kind == protoreflect.MessageKind || f.kind == protoreflect.GroupKind { + single.value.msgVal = f.value.msgListVal[i] + } + var err error + b, err = appendOptionBytesSingle(b, &single) + if err != nil { + return nil, err + } + } + default: + // simple singular value + var err error + b, err = appendOptionBytesSingle(b, f) + if err != nil { + return nil, err + } } } - return nil + + return b, nil } -func interpretElementOptions[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]]( - interp *interpreter, - fqn string, - target *targetType[Elem, OptsStruct, Opts], - elem Elem, - customOpts bool, -) error { - opts := elem.GetOptions() - uninterpreted := opts.GetUninterpretedOption() - if len(uninterpreted) > 0 { - remain, err := interp.interpretOptions(fqn, target.t, elem, opts, uninterpreted, customOpts) - if err != nil { - return err - } - target.setUninterpretedOptions(opts, remain) - } else if customOpts { - // If customOpts is true, we are in second pass of interpreting. - // For second pass, even if there are no options to interpret, we still - // need to verify feature usage. - features := opts.GetFeatures() - var msg protoreflect.Message - if len(features.ProtoReflect().GetUnknown()) > 0 { - // We need to first convert to a message that uses the sources' definition - // of FeatureSet. - optsDesc := opts.ProtoReflect().Descriptor() - optsFqn := string(optsDesc.FullName()) - if md := interp.resolveOptionsType(optsFqn); md != nil { - dm := dynamicpb.NewMessage(md) - if err := cloneInto(dm, opts, interp.resolver); err != nil { - node := interp.file.Node(elem) - return interp.handleError(reporter.Error(interp.nodeInfo(node), err)) - } - msg = dm +func canPack(k protoreflect.Kind) bool { + switch k { + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.StringKind, protoreflect.BytesKind: + return false + default: + return true + } +} + +func appendOptionBytesPacked(b []byte, k protoreflect.Kind, flds []*interpretedField) ([]byte, error) { + for i := range flds { + val := flds[i].value + if val.isList { + l := val.val.List() + var err error + b, err = appendNumericValueBytesPacked(b, k, l) + if err != nil { + return nil, err + } + } else { + var err error + b, err = appendNumericValueBytes(b, k, val.val) + if err != nil { + return nil, err } } - if msg == nil { - msg = opts.ProtoReflect() + } + return b, nil +} + +func appendOptionBytesSingle(b []byte, f *interpretedField) ([]byte, error) { + num := protowire.Number(f.number) + switch f.kind { + case protoreflect.MessageKind: + enclosed, err := appendOptionBytes(nil, f.value.msgVal) + if err != nil { + return nil, err } - err := interp.validateRecursive(false, msg, "", elem, nil, false, false, false) + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendBytes(b, enclosed), nil + + case protoreflect.GroupKind: + b = protowire.AppendTag(b, num, protowire.StartGroupType) + var err error + b, err = appendOptionBytes(b, f.value.msgVal) if err != nil { - return err + return nil, err } + return protowire.AppendTag(b, num, protowire.EndGroupType), nil + + case protoreflect.StringKind: + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendString(b, f.value.val.String()), nil + + case protoreflect.BytesKind: + b = protowire.AppendTag(b, num, protowire.BytesType) + return protowire.AppendBytes(b, f.value.val.Bytes()), nil + + case protoreflect.Int32Kind, protoreflect.Int64Kind, protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, protoreflect.EnumKind, protoreflect.BoolKind: + b = protowire.AppendTag(b, num, protowire.VarintType) + return appendNumericValueBytes(b, f.kind, f.value.val) + + case protoreflect.Fixed32Kind, protoreflect.Sfixed32Kind, protoreflect.FloatKind: + b = protowire.AppendTag(b, num, protowire.Fixed32Type) + return appendNumericValueBytes(b, f.kind, f.value.val) + + case protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind, protoreflect.DoubleKind: + b = protowire.AppendTag(b, num, protowire.Fixed64Type) + return appendNumericValueBytes(b, f.kind, f.value.val) + + default: + return nil, fmt.Errorf("unknown field kind: %v", f.kind) } - return nil +} + +func appendNumericValueBytesPacked(b []byte, k protoreflect.Kind, l protoreflect.List) ([]byte, error) { + for i := 0; i < l.Len(); i++ { + var err error + b, err = appendNumericValueBytes(b, k, l.Get(i)) + if err != nil { + return nil, err + } + } + return b, nil +} + +func appendNumericValueBytes(b []byte, k protoreflect.Kind, v protoreflect.Value) ([]byte, error) { + switch k { + case protoreflect.Int32Kind, protoreflect.Int64Kind: + return protowire.AppendVarint(b, uint64(v.Int())), nil + case protoreflect.Uint32Kind, protoreflect.Uint64Kind: + return protowire.AppendVarint(b, v.Uint()), nil + case protoreflect.Sint32Kind, protoreflect.Sint64Kind: + return protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())), nil + case protoreflect.Fixed32Kind: + return protowire.AppendFixed32(b, uint32(v.Uint())), nil + case protoreflect.Fixed64Kind: + return protowire.AppendFixed64(b, v.Uint()), nil + case protoreflect.Sfixed32Kind: + return protowire.AppendFixed32(b, uint32(v.Int())), nil + case protoreflect.Sfixed64Kind: + return protowire.AppendFixed64(b, uint64(v.Int())), nil + case protoreflect.FloatKind: + return protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))), nil + case protoreflect.DoubleKind: + return protowire.AppendFixed64(b, math.Float64bits(v.Float())), nil + case protoreflect.BoolKind: + return protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())), nil + case protoreflect.EnumKind: + return protowire.AppendVarint(b, uint64(v.Enum())), nil + default: + return nil, fmt.Errorf("unknown field kind: %v", k) + } +} + +// optionsContainer may be optionally implemented by a linker.Result. It is +// not part of the linker.Result interface as it is meant only for internal use. +// This allows the option interpreter step to store extra metadata about the +// serialized structure of options. +type optionsContainer interface { + // AddOptionBytes adds the given pre-serialized option bytes to a file, + // associated with the given options message. The type of the given message + // should be an options message, for example *descriptorpb.MessageOptions. + // This value should be part of the message hierarchy whose root is the + // *descriptorpb.FileDescriptorProto that corresponds to this result. + AddOptionBytes(pm proto.Message, opts []byte) } // interpretOptions processes the options in uninterpreted, which are interpreted as fields -// of the given opts message. The first return value is the features to use for child elements. -// On success, the latter two return values will usually be nil, nil. But if the current +// of the given opts message. On success, it will usually return nil, nil. But if the current // operation is lenient, it may return a non-nil slice of uninterpreted options on success. -// In such a case, the returned slice contains the options which could not be interpreted. -func (interp *interpreter) interpretOptions( - fqn string, - targetType descriptorpb.FieldOptions_OptionTargetType, - element, opts proto.Message, - uninterpreted []*descriptorpb.UninterpretedOption, - customOpts bool, -) ([]*descriptorpb.UninterpretedOption, error) { +// In such a case, the returned value is the remaining slice of options which could not be +// interpreted. +func (interp *interpreter) interpretOptions(fqn string, element, opts proto.Message, uninterpreted []*descriptorpb.UninterpretedOption) ([]*descriptorpb.UninterpretedOption, error) { optsDesc := opts.ProtoReflect().Descriptor() optsFqn := string(optsDesc.FullName()) var msg protoreflect.Message // see if the parse included an override copy for these options - if md := interp.resolveOptionsType(optsFqn); md != nil { + if md := interp.file.ResolveOptionsType(protoreflect.FullName(optsFqn)); md != nil { dm := dynamicpb.NewMessage(md) - if err := cloneInto(dm, opts, interp.resolver); err != nil { + if err := cloneInto(dm, opts, nil); err != nil { node := interp.file.Node(element) - return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err)) + return nil, interp.reporter.HandleError(reporter.Error(interp.nodeInfo(node).Start(), err)) } msg = dm } else { @@ -636,58 +706,33 @@ func (interp *interpreter) interpretOptions( ElementType: descriptorType(element), } var remain []*descriptorpb.UninterpretedOption + results := make([]*interpretedOption, 0, len(uninterpreted)) for _, uo := range uninterpreted { - isCustom := uo.Name[0].GetIsExtension() - if isCustom != customOpts { - // We're not looking at these this phase. - remain = append(remain, uo) - continue - } - firstName := uo.Name[0].GetNamePart() - if targetType == descriptorpb.FieldOptions_TARGET_TYPE_FIELD && - !isCustom && (firstName == "default" || firstName == "json_name") { - // Field pseudo-option that we can skip and is handled elsewhere. - remain = append(remain, uo) - continue - } node := interp.file.OptionNode(uo) - if !isCustom && firstName == "uninterpreted_option" { + if !uo.Name[0].GetIsExtension() && uo.Name[0].GetNamePart() == "uninterpreted_option" { if interp.lenient { remain = append(remain, uo) continue } // uninterpreted_option might be found reflectively, but is not actually valid for use - if err := interp.handleErrorf(interp.nodeInfo(node.GetName()), "%vinvalid option 'uninterpreted_option'", mc); err != nil { + if err := interp.reporter.HandleErrorf(interp.nodeInfo(node.GetName()).Start(), "%vinvalid option 'uninterpreted_option'", mc); err != nil { return nil, err } } mc.Option = uo - interp.enableLenience(true) - srcInfo, err := interp.interpretField(targetType, mc, msg, uo, 0, interp.pathBuffer) - interp.enableLenience(false) + res, err := interp.interpretField(mc, msg, uo, 0, nil) if err != nil { - return nil, err - } - if interp.lenientErrReported { - remain = append(remain, uo) - continue - } - - if srcInfo != nil { - if optn, ok := node.(*ast.OptionNode); ok { - interp.index[optn] = srcInfo + if interp.lenient { + remain = append(remain, uo) + continue } - } - } - - // customOpts is true for the second pass, which is also when we want to validate feature usage. - doValidation := customOpts - if doValidation { - validateRequiredFields := !interp.lenient - err := interp.validateRecursive(validateRequiredFields, msg, "", element, nil, false, false, false) - if err != nil { return nil, err } + res.unknown = !isKnownField(optsDesc, res) + results = append(results, res) + if optn, ok := node.(*ast.OptionNode); ok { + interp.index[optn] = res.path() + } } if interp.lenient { @@ -700,452 +745,208 @@ func (interp *interpreter) interpretOptions( // the work we've done so far. return uninterpreted, nil } - if doValidation { - if err := proto.CheckInitialized(optsClone); err != nil { - // Conversion from dynamic message failed to set some required fields. - // TODO above applies here as well... - return uninterpreted, nil - } - } // conversion from dynamic message above worked, so now // it is safe to overwrite the passed in message proto.Reset(opts) proto.Merge(opts, optsClone) + if interp.container != nil { + b, err := interp.toOptionBytes(mc, results) + if err != nil { + return nil, err + } + interp.container.AddOptionBytes(opts, b) + } + return remain, nil } + if err := validateRecursive(msg, ""); err != nil { + node := interp.file.Node(element) + if err := interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "error in %s options: %v", descriptorType(element), err); err != nil { + return nil, err + } + } + // now try to convert into the passed in message and fail if not successful if err := cloneInto(opts, msg.Interface(), interp.resolver); err != nil { node := interp.file.Node(element) - return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err)) + return nil, interp.reporter.HandleError(reporter.Error(interp.nodeInfo(node).Start(), err)) } - - return remain, nil -} - -// checkFieldUsage verifies that the given option field can be used -// for the given target type. It reports an error if not and returns -// a non-nil error if the handler returned a non-nil error. -func (interp *interpreter) checkFieldUsage( - targetType descriptorpb.FieldOptions_OptionTargetType, - fld protoreflect.FieldDescriptor, - node ast.Node, -) error { - msgOpts, _ := fld.ContainingMessage().Options().(*descriptorpb.MessageOptions) - if msgOpts.GetMessageSetWireFormat() && !messageset.CanSupportMessageSets() { - err := interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option: it uses 'message set wire format' legacy proto1 feature which is not supported", fld.FullName()) + if interp.container != nil { + b, err := interp.toOptionBytes(mc, results) if err != nil { - return err + return nil, err } + interp.container.AddOptionBytes(opts, b) } - opts, ok := fld.Options().(*descriptorpb.FieldOptions) - if !ok { - return nil - } - targetTypes := opts.GetTargets() - if len(targetTypes) == 0 { - return nil + return nil, nil +} + +// isKnownField returns true if the given option is for a known field of the +// given options message descriptor and will be serialized using the expected +// wire type for that known field. +func isKnownField(desc protoreflect.MessageDescriptor, opt *interpretedOption) bool { + var num int32 + if len(opt.pathPrefix) > 0 { + num = opt.pathPrefix[0] + } else { + num = opt.number } - for _, allowedType := range targetTypes { - if allowedType == targetType { - return nil - } + fd := desc.Fields().ByNumber(protoreflect.FieldNumber(num)) + if fd == nil { + return false } - allowedTypes := make([]string, len(targetTypes)) - for i, t := range targetTypes { - allowedTypes[i] = targetTypeString(t) + + // Before the full wire type check, we do a quick check that will usually pass + // and allow us to short-circuit the logic below. + if fd.IsList() == opt.repeated && fd.Kind() == opt.kind { + return true } - if len(targetTypes) == 1 && targetTypes[0] == descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN { - return interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option (it declares no allowed target types)", fld.FullName()) + + // We figure out the wire type this interpreted field will use when serialized. + var wireType protowire.Type + switch { + case len(opt.pathPrefix) > 0: + // If path prefix exists, this field is nested inside a message. + // And messages use bytes wire type. + wireType = protowire.BytesType + case opt.repeated && opt.packed && canPack(opt.kind): + // Packed repeated numeric scalars use bytes wire type. + wireType = protowire.BytesType + default: + wireType = wireTypeForKind(opt.kind) } - return interp.handleErrorf(interp.nodeInfo(node), "field %q is allowed on [%s], not on %s", fld.FullName(), strings.Join(allowedTypes, ","), targetTypeString(targetType)) -} -func targetTypeString(t descriptorpb.FieldOptions_OptionTargetType) string { - return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "TARGET_TYPE_"), "_", " ")) + // And then we see if the wire type we just determined is compatible with + // the field descriptor we found. + if fd.IsList() && canPack(fd.Kind()) && wireType == protowire.BytesType { + // Even if fd.IsPacked() is false, bytes type is still accepted for + // repeated scalar numerics, so that changing a repeated field from + // packed to not-packed (or vice versa) is a compatible change. + return true + } + return wireType == wireTypeForKind(fd.Kind()) } -func editionString(t descriptorpb.Edition) string { - return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "EDITION_"), "_", "-")) +func wireTypeForKind(kind protoreflect.Kind) protowire.Type { + switch kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: + return protowire.BytesType + case protoreflect.GroupKind: + return protowire.StartGroupType + case protoreflect.Fixed32Kind, protoreflect.Sfixed32Kind, protoreflect.FloatKind: + return protowire.Fixed32Type + case protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind, protoreflect.DoubleKind: + return protowire.Fixed64Type + default: + // everything else uses varint + return protowire.VarintType + } } func cloneInto(dest proto.Message, src proto.Message, res linker.Resolver) error { if dest.ProtoReflect().Descriptor() == src.ProtoReflect().Descriptor() { proto.Reset(dest) proto.Merge(dest, src) + if err := proto.CheckInitialized(dest); err != nil { + return err + } return nil } // If descriptors are not the same, we could have field descriptors in src that // don't match the ones in dest. There's no easy/sane way to handle that. So we // just marshal to bytes and back to do this - marshaler := proto.MarshalOptions{ - // We've already validated required fields before this point, - // so we can allow partial here. - AllowPartial: true, - } - data, err := marshaler.Marshal(src) + data, err := proto.Marshal(src) if err != nil { return err } - unmarshaler := proto.UnmarshalOptions{AllowPartial: true} - if res != nil { - unmarshaler.Resolver = res - } else { - // Use a typed nil, which returns "not found" to all queries - // and prevents fallback to protoregistry.GlobalTypes. - unmarshaler.Resolver = (*protoregistry.Types)(nil) - } - return unmarshaler.Unmarshal(data, dest) + return proto.UnmarshalOptions{Resolver: res}.Unmarshal(data, dest) } -func (interp *interpreter) validateRecursive( - validateRequiredFields bool, - msg protoreflect.Message, - prefix string, - element proto.Message, - path []int32, - isFeatures bool, - inFeatures bool, - inMap bool, -) error { - if validateRequiredFields { - flds := msg.Descriptor().Fields() - var missingFields []string - for i := 0; i < flds.Len(); i++ { - fld := flds.Get(i) - if fld.Cardinality() == protoreflect.Required && !msg.Has(fld) { - missingFields = append(missingFields, fmt.Sprintf("%s%s", prefix, fld.Name())) - } +func (interp *interpreter) toOptionBytes(mc *internal.MessageContext, results []*interpretedOption) ([]byte, error) { + // protoc emits non-custom options in tag order and then + // the rest are emitted in the order they are defined in source + sort.SliceStable(results, func(i, j int) bool { + if !results[i].unknown && results[j].unknown { + return true } - if len(missingFields) > 0 { - node := interp.findOptionNode(path, element) - err := interp.handleErrorf(interp.nodeInfo(node), "error in %s options: some required fields missing: %v", descriptorType(element), strings.Join(missingFields, ", ")) - if err != nil { - return err - } + if !results[i].unknown && !results[j].unknown { + return results[i].number < results[j].number } - } - - var err error - msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { - chpath := path - if !inMap { - chpath = append(chpath, int32(fld.Number())) - } - chInFeatures := isFeatures || inFeatures - chIsFeatures := !chInFeatures && len(path) == 0 && fld.Name() == "features" - - if (isFeatures || (inFeatures && fld.IsExtension())) && - interp.file.FileNode().Name() == fld.ParentFile().Path() { - var what, name string - if fld.IsExtension() { - what = "custom feature" - name = "(" + string(fld.FullName()) + ")" - } else { - what = "feature" - name = string(fld.Name()) + return false + }) + var b []byte + for _, res := range results { + var err error + b, err = res.appendOptionBytes(b) + if err != nil { + if _, ok := err.(reporter.ErrorWithPos); !ok { + pos := ast.SourcePos{Filename: interp.file.AST().Name()} + err = reporter.Errorf(pos, "%sfailed to encode options: %w", mc, err) } - node := interp.findOptionNode(path, element) - err = interp.handleErrorf(interp.nodeInfo(node), "%s %s cannot be used from the same file in which it is defined", what, name) - if err != nil { - return false + if err := interp.reporter.HandleError(err); err != nil { + return nil, err } } + } + return b, nil +} - if chInFeatures { - // Validate feature usage against feature settings. +func validateRecursive(msg protoreflect.Message, prefix string) error { + flds := msg.Descriptor().Fields() + var missingFields []string + for i := 0; i < flds.Len(); i++ { + fld := flds.Get(i) + if fld.Cardinality() == protoreflect.Required && !msg.Has(fld) { + missingFields = append(missingFields, fmt.Sprintf("%s%s", prefix, fld.Name())) + } + } + if len(missingFields) > 0 { + return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", ")) + } - // First, check the feature support settings of the field. - opts, _ := fld.Options().(*descriptorpb.FieldOptions) - edition := interp.file.FileDescriptorProto().GetEdition() - if opts != nil && opts.FeatureSupport != nil { - err = interp.validateFeatureSupport(edition, opts.FeatureSupport, "field", string(fld.FullName()), chpath, element) + var err error + msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { + if fld.IsMap() { + md := fld.MapValue().Message() + if md != nil { + val.Map().Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + chprefix := fmt.Sprintf("%s%s[%v].", prefix, fieldName(fld), k) + err = validateRecursive(v.Message(), chprefix) + return err == nil + }) if err != nil { return false } } - // Then, if it's an enum or has an enum, check the feature support settings of the enum values. - var enum protoreflect.EnumDescriptor - if fld.Enum() != nil { - enum = fld.Enum() - } else if fld.IsMap() && fld.MapValue().Enum() != nil { - enum = fld.MapValue().Enum() - } - if enum != nil { - switch { - case fld.IsMap(): - val.Map().Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - // Can't construct path to particular map entry since we don't this entry's index. - // So we leave chpath alone, and it will have to point to the whole map value (or - // the first entry if the map is de-structured across multiple option statements). - err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), chpath, element) - return err == nil - }) - if err != nil { - return false - } - case fld.IsList(): + } else { + md := fld.Message() + if md != nil { + if fld.IsList() { sl := val.List() for i := 0; i < sl.Len(); i++ { v := sl.Get(i) - err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), append(chpath, int32(i)), element) + chprefix := fmt.Sprintf("%s%s[%d].", prefix, fieldName(fld), i) + err = validateRecursive(v.Message(), chprefix) if err != nil { return false } } - default: - err = interp.validateEnumValueFeatureSupport(edition, enum, val.Enum(), chpath, element) + } else { + chprefix := fmt.Sprintf("%s%s.", prefix, fieldName(fld)) + err = validateRecursive(val.Message(), chprefix) if err != nil { return false } } - } - } - - // If it's a message or contains a message, recursively validate fields in those messages. - switch { - case fld.IsMap() && fld.MapValue().Message() != nil: - val.Map().Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - chprefix := fmt.Sprintf("%s%s[%v].", prefix, fieldName(fld), k) - err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, true) - return err == nil - }) - if err != nil { - return false - } - case fld.IsList() && fld.Message() != nil: - sl := val.List() - for i := 0; i < sl.Len(); i++ { - v := sl.Get(i) - chprefix := fmt.Sprintf("%s%s[%d].", prefix, fieldName(fld), i) - if !inMap { - chpath = append(chpath, int32(i)) - } - err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap) - if err != nil { - return false - } - } - case !fld.IsMap() && fld.Message() != nil: - chprefix := fmt.Sprintf("%s%s.", prefix, fieldName(fld)) - err = interp.validateRecursive(validateRequiredFields, val.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap) - if err != nil { - return false - } - } - return true - }) - return err -} - -func (interp *interpreter) validateEnumValueFeatureSupport( - edition descriptorpb.Edition, - enum protoreflect.EnumDescriptor, - number protoreflect.EnumNumber, - path []int32, - element proto.Message, -) error { - enumVal := enum.Values().ByNumber(number) - if enumVal == nil { - return nil - } - enumValOpts, _ := enumVal.Options().(*descriptorpb.EnumValueOptions) - if enumValOpts == nil || enumValOpts.FeatureSupport == nil { - return nil - } - return interp.validateFeatureSupport(edition, enumValOpts.FeatureSupport, "enum value", string(enumVal.Name()), path, element) -} - -func (interp *interpreter) validateFeatureSupport( - edition descriptorpb.Edition, - featureSupport *descriptorpb.FieldOptions_FeatureSupport, - what string, - name string, - path []int32, - element proto.Message, -) error { - if featureSupport.EditionIntroduced != nil && edition < featureSupport.GetEditionIntroduced() { - node := interp.findOptionNode(path, element) - err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was not introduced until edition %s", what, name, editionString(featureSupport.GetEditionIntroduced())) - if err != nil { - return err - } - } - if featureSupport.EditionRemoved != nil && edition >= featureSupport.GetEditionRemoved() { - node := interp.findOptionNode(path, element) - err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was removed in edition %s", what, name, editionString(featureSupport.GetEditionRemoved())) - if err != nil { - return err - } - } - if featureSupport.EditionDeprecated != nil && edition >= featureSupport.GetEditionDeprecated() { - node := interp.findOptionNode(path, element) - var suffix string - if featureSupport.GetDeprecationWarning() != "" { - suffix = ": " + featureSupport.GetDeprecationWarning() - } - interp.reporter.HandleWarningf(interp.nodeInfo(node), "%s %q is deprecated as of edition %s%s", what, name, editionString(featureSupport.GetEditionDeprecated()), suffix) - } - return nil -} - -func (interp *interpreter) findOptionNode( - path []int32, - element proto.Message, -) ast.Node { - elementNode := interp.file.Node(element) - nodeWithOpts, _ := elementNode.(ast.NodeWithOptions) - if nodeWithOpts == nil { - return elementNode - } - node, _ := findOptionNode[*ast.OptionNode]( - path, - optionsRanger{nodeWithOpts}, - func(n *ast.OptionNode) *sourceinfo.OptionSourceInfo { - return interp.index[n] - }, - ) - if node != nil { - return node - } - return elementNode -} - -func findOptionNode[N ast.Node]( - path []int32, - nodes interface { - Range(func(N, ast.ValueNode) bool) - }, - srcInfoAccessor func(N) *sourceinfo.OptionSourceInfo, -) (ast.Node, int) { - var bestMatch ast.Node - var bestMatchLen int - nodes.Range(func(node N, val ast.ValueNode) bool { - srcInfo := srcInfoAccessor(node) - if srcInfo == nil { - // can happen if we are lenient when interpreting -- this node - // could not be interpreted and thus has no source info; skip - return true - } - if srcInfo.Path[0] < 0 { - // negative first value means it's a field pseudo-option; skip - return true - } - match, matchLen := findOptionValueNode(path, node, val, srcInfo) - if matchLen > bestMatchLen { - bestMatch = match - bestMatchLen = matchLen - if matchLen >= len(path) { - // not going to find a better one - return false - } - } - return true - }) - return bestMatch, bestMatchLen -} - -type optionsRanger struct { - node ast.NodeWithOptions -} - -func (r optionsRanger) Range(f func(*ast.OptionNode, ast.ValueNode) bool) { - r.node.RangeOptions(func(optNode *ast.OptionNode) bool { - return f(optNode, optNode.Val) - }) -} - -type valueRanger []ast.ValueNode - -func (r valueRanger) Range(f func(ast.ValueNode, ast.ValueNode) bool) { - for _, elem := range r { - if !f(elem, elem) { - return - } - } -} - -type fieldRanger map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo - -func (r fieldRanger) Range(f func(*ast.MessageFieldNode, ast.ValueNode) bool) { - for elem := range r { - if !f(elem, elem.Val) { - return - } - } -} - -func isPathMatch(a, b []int32) bool { - length := len(a) - if len(b) < length { - length = len(b) - } - for i := 0; i < length; i++ { - if a[i] != b[i] { - return false + } } - } - return true -} - -func findOptionValueNode( - path []int32, - node ast.Node, - value ast.ValueNode, - srcInfo *sourceinfo.OptionSourceInfo, -) (ast.Node, int) { - srcInfoPath := srcInfo.Path - if _, ok := srcInfo.Children.(*sourceinfo.ArrayLiteralSourceInfo); ok { - // Last path element for array source info is the index of the - // first element. So exclude in the comparison, since path could - // indicate a later index, which is present in the array. - srcInfoPath = srcInfo.Path[:len(srcInfo.Path)-1] - } - - if !isPathMatch(path, srcInfoPath) { - return nil, 0 - } - if len(srcInfoPath) >= len(path) { - return node, len(path) - } - - switch children := srcInfo.Children.(type) { - case *sourceinfo.ArrayLiteralSourceInfo: - array, ok := value.(*ast.ArrayLiteralNode) - if !ok { - break // should never happen - } - var i int - match, matchLen := findOptionNode[ast.ValueNode]( - path, - valueRanger(array.Elements), - func(_ ast.ValueNode) *sourceinfo.OptionSourceInfo { - val := &children.Elements[i] - i++ - return val - }, - ) - if match != nil { - return match, matchLen - } - - case *sourceinfo.MessageLiteralSourceInfo: - match, matchLen := findOptionNode[*ast.MessageFieldNode]( - path, - fieldRanger(children.Fields), - func(n *ast.MessageFieldNode) *sourceinfo.OptionSourceInfo { - return children.Fields[n] - }, - ) - if match != nil { - return match, matchLen - } - } - - return node, len(srcInfoPath) + return true + }) + return err } // interpretField interprets the option described by opt, as a field inside the given msg. This @@ -1153,18 +954,7 @@ func findOptionValueNode( // msg must be an options message. For nameIndex > 0, msg is a nested message inside of the // options message. The given pathPrefix is the path (sequence of field numbers and indices // with a FileDescriptorProto as the start) up to but not including the given nameIndex. -// -// Any errors encountered will be handled, so the returned error will only be non-nil if -// the handler returned non-nil. Callers must check that the source info is non-nil before -// using it since it can be nil (in the event of a problem) even if the error is nil. -func (interp *interpreter) interpretField( - targetType descriptorpb.FieldOptions_OptionTargetType, - mc *internal.MessageContext, - msg protoreflect.Message, - opt *descriptorpb.UninterpretedOption, - nameIndex int, - pathPrefix []int32, -) (*sourceinfo.OptionSourceInfo, error) { +func (interp *interpreter) interpretField(mc *internal.MessageContext, msg protoreflect.Message, opt *descriptorpb.UninterpretedOption, nameIndex int, pathPrefix []int32) (*interpretedOption, error) { var fld protoreflect.FieldDescriptor nm := opt.GetName()[nameIndex] node := interp.file.OptionNamePartNode(nm) @@ -1173,45 +963,37 @@ func (interp *interpreter) interpretField( if extName[0] == '.' { extName = extName[1:] /* skip leading dot */ } - var err error - fld, err = interp.resolveExtensionType(extName) - if errors.Is(err, protoregistry.NotFound) { - return nil, interp.handleErrorf(interp.nodeInfo(node), + fld = interp.file.ResolveExtension(protoreflect.FullName(extName)) + if fld == nil { + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "%vunrecognized extension %s of %s", mc, extName, msg.Descriptor().FullName()) - } else if err != nil { - return nil, interp.handleErrorWithPos(interp.nodeInfo(node), err) } if fld.ContainingMessage().FullName() != msg.Descriptor().FullName() { - return nil, interp.handleErrorf(interp.nodeInfo(node), + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "%vextension %s should extend %s but instead extends %s", mc, extName, msg.Descriptor().FullName(), fld.ContainingMessage().FullName()) } } else { fld = msg.Descriptor().Fields().ByName(protoreflect.Name(nm.GetNamePart())) if fld == nil { - return nil, interp.handleErrorf(interp.nodeInfo(node), + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "%vfield %s of %s does not exist", mc, nm.GetNamePart(), msg.Descriptor().FullName()) } } - pathPrefix = append(pathPrefix, int32(fld.Number())) - - if err := interp.checkFieldUsage(targetType, fld, node); err != nil { - return nil, err - } if len(opt.GetName()) > nameIndex+1 { nextnm := opt.GetName()[nameIndex+1] nextnode := interp.file.OptionNamePartNode(nextnm) k := fld.Kind() if k != protoreflect.MessageKind && k != protoreflect.GroupKind { - return nil, interp.handleErrorf(interp.nodeInfo(nextnode), + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(nextnode).Start(), "%vcannot set field %s because %s is not a message", mc, nextnm.GetNamePart(), nm.GetNamePart()) } if fld.Cardinality() == protoreflect.Repeated { - return nil, interp.handleErrorf(interp.nodeInfo(nextnode), + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(nextnode).Start(), "%vcannot set field %s because %s is repeated (must use an aggregate)", mc, nextnm.GetNamePart(), nm.GetNamePart()) } @@ -1223,341 +1005,208 @@ func (interp *interpreter) interpretField( if ood := fld.ContainingOneof(); ood != nil { existingFld := msg.WhichOneof(ood) if existingFld != nil && existingFld.Number() != fld.Number() { - return nil, interp.handleErrorf(interp.nodeInfo(node), + return nil, interp.reporter.HandleErrorf(interp.nodeInfo(node).Start(), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) } } - fldVal := msg.NewField(fld) - fdm = fldVal.Message() - msg.Set(fld, fldVal) + fdm = dynamicpb.NewMessage(fld.Message()) + msg.Set(fld, protoreflect.ValueOfMessage(fdm)) } // recurse to set next part of name - return interp.interpretField(targetType, mc, fdm, opt, nameIndex+1, pathPrefix) + return interp.interpretField(mc, fdm, opt, nameIndex+1, append(pathPrefix, int32(fld.Number()))) } optNode := interp.file.OptionNode(opt) - optValNode := optNode.GetValue() - var srcInfo *sourceinfo.OptionSourceInfo - var err error - if optValNode.Value() == nil { - err = interp.setOptionFieldFromProto(targetType, mc, msg, fld, node, opt, optValNode) - srcInfoVal := newSrcInfo(pathPrefix, nil) - srcInfo = &srcInfoVal - } else { - srcInfo, err = interp.setOptionField(targetType, mc, msg, fld, node, optValNode, false, pathPrefix) - } + val, err := interp.setOptionField(mc, msg, fld, node, optNode.GetValue(), false) if err != nil { - return nil, err + return nil, interp.reporter.HandleError(err) } - - return srcInfo, nil + var index int32 + if fld.IsMap() { + index = int32(msg.Get(fld).Map().Len()) - 1 + } else if fld.IsList() { + index = int32(msg.Get(fld).List().Len()) - 1 + } + return &interpretedOption{ + pathPrefix: pathPrefix, + interpretedField: interpretedField{ + number: int32(fld.Number()), + index: index, + kind: fld.Kind(), + repeated: fld.Cardinality() == protoreflect.Repeated, + value: val, + // NB: don't set packed here in a top-level option + // (only values in message literals will be serialized + // in packed format) + }, + }, nil } // setOptionField sets the value for field fld in the given message msg to the value represented -// by AST node val. The given name is the AST node that corresponds to the name of fld. On success, -// it returns additional metadata about the field that was set. -func (interp *interpreter) setOptionField( - targetType descriptorpb.FieldOptions_OptionTargetType, - mc *internal.MessageContext, - msg protoreflect.Message, - fld protoreflect.FieldDescriptor, - name ast.Node, - val ast.ValueNode, - insideMsgLiteral bool, - pathPrefix []int32, -) (*sourceinfo.OptionSourceInfo, error) { +// by val. The given name is the AST node that corresponds to the name of fld. On success, it +// returns additional metadata about the field that was set. +func (interp *interpreter) setOptionField(mc *internal.MessageContext, msg protoreflect.Message, fld protoreflect.FieldDescriptor, name ast.Node, val ast.ValueNode, insideMsgLiteral bool) (interpretedFieldValue, error) { v := val.Value() if sl, ok := v.([]ast.ValueNode); ok { // handle slices a little differently than the others if fld.Cardinality() != protoreflect.Repeated { - return nil, interp.handleErrorf(interp.nodeInfo(val), "%vvalue is an array but field is not repeated", mc) + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue is an array but field is not repeated", mc) } origPath := mc.OptAggPath defer func() { mc.OptAggPath = origPath }() - childVals := make([]sourceinfo.OptionSourceInfo, len(sl)) - var firstIndex int - if fld.IsMap() { - firstIndex = msg.Get(fld).Map().Len() - } else { - firstIndex = msg.Get(fld).List().Len() - } + var resVal listValue + var resMsgVals [][]*interpretedField for index, item := range sl { mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, index) - value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, item, insideMsgLiteral, append(pathPrefix, int32(firstIndex+index))) - if err != nil || !value.IsValid() { - return nil, err + value, err := interp.fieldValue(mc, fld, item, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err } if fld.IsMap() { - mv := msg.Mutable(fld).Map() - setMapEntry(fld, msg, mv, value.Message()) + setMapEntry(msg, fld, &value) } else { - lv := msg.Mutable(fld).List() - lv.Append(value) + msg.Mutable(fld).List().Append(value.val) + } + resVal = append(resVal, value.val) + if value.msgVal != nil { + resMsgVals = append(resMsgVals, value.msgVal) } - childVals[index] = srcInfo } - srcInfo := newSrcInfo(append(pathPrefix, int32(firstIndex)), &sourceinfo.ArrayLiteralSourceInfo{Elements: childVals}) - return &srcInfo, nil - } - - if fld.IsMap() { - pathPrefix = append(pathPrefix, int32(msg.Get(fld).Map().Len())) - } else if fld.IsList() { - pathPrefix = append(pathPrefix, int32(msg.Get(fld).List().Len())) + return interpretedFieldValue{ + isList: true, + val: protoreflect.ValueOfList(&resVal), + msgListVal: resMsgVals, + }, nil } - value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, val, insideMsgLiteral, pathPrefix) - if err != nil || !value.IsValid() { - return nil, err + value, err := interp.fieldValue(mc, fld, val, insideMsgLiteral) + if err != nil { + return interpretedFieldValue{}, err } if ood := fld.ContainingOneof(); ood != nil { existingFld := msg.WhichOneof(ood) if existingFld != nil && existingFld.Number() != fld.Number() { - return nil, interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(name).Start(), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) } } switch { case fld.IsMap(): - mv := msg.Mutable(fld).Map() - setMapEntry(fld, msg, mv, value.Message()) + setMapEntry(msg, fld, &value) case fld.IsList(): - lv := msg.Mutable(fld).List() - lv.Append(value) + msg.Mutable(fld).List().Append(value.val) default: if msg.Has(fld) { - return nil, interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld)) + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(name).Start(), "%vnon-repeated option field %s already set", mc, fieldName(fld)) } - msg.Set(fld, value) + msg.Set(fld, value.val) } - return &srcInfo, nil + return value, nil } -// setOptionFieldFromProto sets the value for field fld in the given message msg to the value -// represented by the given uninterpreted option. The given ast.Node, if non-nil, will be used -// to report source positions in error messages. On success, it returns additional metadata -// about the field that was set. -func (interp *interpreter) setOptionFieldFromProto( - targetType descriptorpb.FieldOptions_OptionTargetType, - mc *internal.MessageContext, - msg protoreflect.Message, - fld protoreflect.FieldDescriptor, - name ast.Node, - opt *descriptorpb.UninterpretedOption, - node ast.Node, -) error { - k := fld.Kind() - var value protoreflect.Value - switch k { - case protoreflect.EnumKind: - num, _, err := interp.enumFieldValueFromProto(mc, fld.Enum(), opt, node) - if err != nil { - return interp.handleError(err) - } - value = protoreflect.ValueOfEnum(num) - - case protoreflect.MessageKind, protoreflect.GroupKind: - if opt.AggregateValue == nil { - return interp.handleErrorf(interp.nodeInfo(node), "%vexpecting message, got %s", mc, optionValueKind(opt)) - } - // We must parse the text format from the aggregate value string - var elem protoreflect.Message - switch { - case fld.IsMap(): - elem = dynamicpb.NewMessage(fld.Message()) - case fld.IsList(): - elem = msg.Get(fld).List().NewElement().Message() - default: - elem = msg.NewField(fld).Message() - } - err := prototext.UnmarshalOptions{ - Resolver: &msgLiteralResolver{interp: interp, pkg: fld.ParentFile().Package()}, - AllowPartial: true, - }.Unmarshal([]byte(opt.GetAggregateValue()), elem.Interface()) - if err != nil { - return interp.handleErrorf(interp.nodeInfo(node), "%vfailed to parse message literal %w", mc, err) - } - if err := interp.checkFieldUsagesInMessage(targetType, elem, node); err != nil { - return err - } - value = protoreflect.ValueOfMessage(elem) - - default: - v, err := interp.scalarFieldValueFromProto(mc, descriptorpb.FieldDescriptorProto_Type(k), opt, node) - if err != nil { - return interp.handleError(err) - } - value = protoreflect.ValueOf(v) +func setMapEntry(msg protoreflect.Message, fld protoreflect.FieldDescriptor, value *interpretedFieldValue) { + entry := value.val.Message() + keyFld, valFld := fld.MapKey(), fld.MapValue() + // if an entry is missing a key or value, we add in an explicit + // zero value to msgVals to match protoc (which also odds these + // in even if not present in source) + if !entry.Has(keyFld) { + // put key before value + value.msgVal = append(append(([]*interpretedField)(nil), zeroValue(keyFld)), value.msgVal...) } - - if ood := fld.ContainingOneof(); ood != nil { - existingFld := msg.WhichOneof(ood) - if existingFld != nil && existingFld.Number() != fld.Number() { - return interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld)) - } + if !entry.Has(valFld) { + value.msgVal = append(value.msgVal, zeroValue(valFld)) + } + key := entry.Get(keyFld) + val := entry.Get(valFld) + if dm, ok := val.Interface().(*dynamicpb.Message); ok && (dm == nil || !dm.IsValid()) { + val = protoreflect.ValueOfMessage(dynamicpb.NewMessage(valFld.Message())) } + m := msg.Mutable(fld).Map() + // TODO: error if key is already present + m.Set(key.MapKey(), val) +} - switch { - case fld.IsMap(): - mv := msg.Mutable(fld).Map() - setMapEntry(fld, msg, mv, value.Message()) - case fld.IsList(): - msg.Mutable(fld).List().Append(value) - default: - if msg.Has(fld) { - return interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld)) - } - msg.Set(fld, value) +// zeroValue returns the zero value for the field types as a *interpretedField. +// The given fld must NOT be a repeated field. +func zeroValue(fld protoreflect.FieldDescriptor) *interpretedField { + var val protoreflect.Value + var msgVal []*interpretedField + switch fld.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + // needs to be non-nil, but empty + msgVal = []*interpretedField{} + msg := dynamicpb.NewMessage(fld.Message()) + val = protoreflect.ValueOfMessage(msg) + case protoreflect.EnumKind: + val = protoreflect.ValueOfEnum(0) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + val = protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + val = protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + val = protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + val = protoreflect.ValueOfUint64(0) + case protoreflect.BoolKind: + val = protoreflect.ValueOfBool(false) + case protoreflect.FloatKind: + val = protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + val = protoreflect.ValueOfFloat64(0) + case protoreflect.BytesKind: + val = protoreflect.ValueOfBytes(nil) + case protoreflect.StringKind: + val = protoreflect.ValueOfString("") + } + return &interpretedField{ + number: int32(fld.Number()), + kind: fld.Kind(), + value: interpretedFieldValue{ + val: val, + msgVal: msgVal, + }, } - return nil } -// checkFieldUsagesInMessage verifies that all fields present in the given -// message can be used for the given target type. When an AST is -// present, we validate each field as it is processed. But without -// an AST, we unmarshal a message from an uninterpreted option's -// aggregate value string, and then must make sure that all fields -// set in that message are valid. This reports an error for each -// invalid field it encounters and returns a non-nil error if/when -// the handler returns a non-nil error. -func (interp *interpreter) checkFieldUsagesInMessage( - targetType descriptorpb.FieldOptions_OptionTargetType, - msg protoreflect.Message, - node ast.Node, -) error { - var err error - msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool { - err = interp.checkFieldUsage(targetType, fld, node) - if err != nil { - return false - } - switch { - case fld.IsList() && fld.Message() != nil: - listVal := val.List() - for i, length := 0, listVal.Len(); i < length; i++ { - err = interp.checkFieldUsagesInMessage(targetType, listVal.Get(i).Message(), node) - if err != nil { - return false - } - } - case fld.IsMap() && fld.MapValue().Message() != nil: - mapVal := val.Map() - mapVal.Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool { - err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node) - return err == nil - }) - case !fld.IsMap() && fld.Message() != nil: - err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node) - } - return err == nil - }) - return err +type listValue []protoreflect.Value + +var _ protoreflect.List = (*listValue)(nil) + +func (l listValue) Len() int { + return len(l) } -func (interp *interpreter) enableLenience(enable bool) { - if !interp.lenient { - return // nothing to do - } - if enable { - // reset the flag that tracks if an error has been reported - interp.lenientErrReported = false - } - interp.lenienceEnabled = enable +func (l listValue) Get(i int) protoreflect.Value { + return l[i] } -func setMapEntry( - fld protoreflect.FieldDescriptor, - msg protoreflect.Message, - mapVal protoreflect.Map, - entry protoreflect.Message, -) { - keyFld, valFld := fld.MapKey(), fld.MapValue() - key := entry.Get(keyFld) - val := entry.Get(valFld) - if fld.MapValue().Kind() == protoreflect.MessageKind { - // Replace any nil/invalid values with an empty message - dm, valIsDynamic := val.Interface().(*dynamicpb.Message) - if (valIsDynamic && dm == nil) || !val.Message().IsValid() { - val = protoreflect.ValueOfMessage(dynamicpb.NewMessage(valFld.Message())) - } - _, containerIsDynamic := msg.Interface().(*dynamicpb.Message) - if valIsDynamic && !containerIsDynamic { - // This happens because we create dynamic messages to represent map entries, - // but the container of the map may expect a non-dynamic, generated type. - dest := mapVal.NewValue() - _, destIsDynamic := dest.Message().Interface().(*dynamicpb.Message) - if !destIsDynamic { - // reflection Set methods do not support cases where destination is - // generated but source is dynamic (or vice versa). But proto.Merge - // *DOES* support that, as long as dest and source use the same - // descriptor. - proto.Merge(dest.Message().Interface(), val.Message().Interface()) - val = dest - } - } - } - // TODO: error if key is already present - mapVal.Set(key.MapKey(), val) +func (l listValue) Set(i int, value protoreflect.Value) { + l[i] = value } -type msgLiteralResolver struct { - interp *interpreter - pkg protoreflect.FullName +func (l *listValue) Append(value protoreflect.Value) { + *l = append(*l, value) } -func (r *msgLiteralResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - if r.interp.resolver == nil { - return nil, protoregistry.NotFound - } - return r.interp.resolver.FindMessageByName(message) +func (l listValue) AppendMutable() protoreflect.Value { + panic("AppendMutable not supported") } -func (r *msgLiteralResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - // In a message literal, we don't allow arbitrary URL prefixes - pos := strings.LastIndexByte(url, '/') - var urlPrefix string - if pos > 0 { - urlPrefix = url[:pos] - } - if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { - return nil, fmt.Errorf("could not resolve type reference %s", url) - } - return r.FindMessageByName(protoreflect.FullName(url[pos+1:])) +func (l *listValue) Truncate(i int) { + *l = (*l)[:i] } -func (r *msgLiteralResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if r.interp.resolver == nil { - return nil, protoregistry.NotFound - } - // In a message literal, extension name may be partially qualified, relative to package. - // So we have to search through package scopes. - pkg := r.pkg - for { - // TODO: This does not *fully* implement the insane logic of protoc with regards - // to resolving relative references. - // https://protobuf.com/docs/language-spec#reference-resolution - name := pkg.Append(protoreflect.Name(field)) - ext, err := r.interp.resolver.FindExtensionByName(name) - if err == nil { - return ext, nil - } - if pkg == "" { - // no more namespaces to check - return nil, err - } - pkg = pkg.Parent() - } +func (l listValue) NewElement() protoreflect.Value { + panic("NewElement not supported") } -func (r *msgLiteralResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if r.interp.resolver == nil { - return nil, protoregistry.NotFound - } - return r.interp.resolver.FindExtensionByNumber(message, field) +func (l listValue) IsValid() bool { + return true } func fieldName(fld protoreflect.FieldDescriptor) string { @@ -1593,88 +1242,38 @@ func valueKind(val interface{}) string { } } -func optionValueKind(opt *descriptorpb.UninterpretedOption) string { - switch { - case opt.IdentifierValue != nil: - return "identifier" - case opt.PositiveIntValue != nil: - return "integer" - case opt.NegativeIntValue != nil: - return "negative integer" - case opt.DoubleValue != nil: - return "double" - case opt.StringValue != nil: - return "string" - case opt.AggregateValue != nil: - return "message" - default: - // should not be possible - return "" - } -} - // fieldValue computes a compile-time value (constant or list or message literal) for the given // AST node val. The value in val must be assignable to the field fld. -// -// If the returned value is not valid, then an error occurred during processing. -// The returned err may be nil, however, as any errors will already have been -// handled (so the resulting error could be nil if the handler returned nil). -func (interp *interpreter) fieldValue( - targetType descriptorpb.FieldOptions_OptionTargetType, - mc *internal.MessageContext, - msg protoreflect.Message, - fld protoreflect.FieldDescriptor, - val ast.ValueNode, - insideMsgLiteral bool, - pathPrefix []int32, -) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) { +func (interp *interpreter) fieldValue(mc *internal.MessageContext, fld protoreflect.FieldDescriptor, val ast.ValueNode, insideMsgLiteral bool) (interpretedFieldValue, error) { k := fld.Kind() switch k { case protoreflect.EnumKind: num, _, err := interp.enumFieldValue(mc, fld.Enum(), val, insideMsgLiteral) if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err) + return interpretedFieldValue{}, err } - return protoreflect.ValueOfEnum(num), newSrcInfo(pathPrefix, nil), nil + return interpretedFieldValue{val: protoreflect.ValueOfEnum(num)}, nil case protoreflect.MessageKind, protoreflect.GroupKind: v := val.Value() if aggs, ok := v.([]*ast.MessageFieldNode); ok { - var childMsg protoreflect.Message - switch { - case fld.IsList(): - // List of messages - val := msg.NewField(fld) - childMsg = val.List().NewElement().Message() - case fld.IsMap(): - // No generated type for map entries, so we use a dynamic type - childMsg = dynamicpb.NewMessage(fld.Message()) - default: - // Normal message field - childMsg = msg.NewField(fld).Message() - } - return interp.messageLiteralValue(targetType, mc, aggs, childMsg, pathPrefix) + fmd := fld.Message() + return interp.messageLiteralValue(mc, aggs, fmd) } - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, - interp.handleErrorf(interp.nodeInfo(val), "%vexpecting message, got %s", mc, valueKind(v)) + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting message, got %s", mc, valueKind(v)) default: v, err := interp.scalarFieldValue(mc, descriptorpb.FieldDescriptorProto_Type(k), val, insideMsgLiteral) if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err) + return interpretedFieldValue{}, err } - return protoreflect.ValueOf(v), newSrcInfo(pathPrefix, nil), nil + return interpretedFieldValue{val: protoreflect.ValueOf(v)}, nil } } // enumFieldValue resolves the given AST node val as an enum value descriptor. If the given -// value is not a valid identifier (or number if allowed), an error is returned instead. -func (interp *interpreter) enumFieldValue( - mc *internal.MessageContext, - ed protoreflect.EnumDescriptor, - val ast.ValueNode, - allowNumber bool, -) (protoreflect.EnumNumber, protoreflect.Name, error) { +// value is not a valid identifier, an error is returned instead. +func (interp *interpreter) enumFieldValue(mc *internal.MessageContext, ed protoreflect.EnumDescriptor, val ast.ValueNode, allowNumber bool) (protoreflect.EnumNumber, protoreflect.Name, error) { v := val.Value() var num protoreflect.EnumNumber switch v := v.(type) { @@ -1682,71 +1281,42 @@ func (interp *interpreter) enumFieldValue( name := protoreflect.Name(v) ev := ed.Values().ByName(name) if ev == nil { - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%venum %s has no value named %s", mc, ed.FullName(), v) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%venum %s has no value named %s", mc, ed.FullName(), v) } return ev.Number(), name, nil case int64: if !allowNumber { - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v)) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum name, got %s", mc, valueKind(v)) } if v > math.MaxInt32 || v < math.MinInt32 { - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for an enum", mc, v) } num = protoreflect.EnumNumber(v) case uint64: if !allowNumber { - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v)) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum name, got %s", mc, valueKind(v)) } if v > math.MaxInt32 { - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for an enum", mc, v) } num = protoreflect.EnumNumber(v) default: - return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum, got %s", mc, valueKind(v)) + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting enum, got %s", mc, valueKind(v)) } ev := ed.Values().ByNumber(num) if ev != nil { return num, ev.Name(), nil } - if ed.IsClosed() { - return num, "", reporter.Errorf(interp.nodeInfo(val), "%vclosed enum %s has no value with number %d", mc, ed.FullName(), num) + if ed.Syntax() != protoreflect.Proto3 { + return 0, "", reporter.Errorf(interp.nodeInfo(val).Start(), "%vclosed enum %s has no value with number %d", mc, ed.FullName(), num) } // unknown value, but enum is open, so we allow it and return blank name return num, "", nil } -// enumFieldValueFromProto resolves the given uninterpreted option value as an enum value descriptor. -// If the given value is not a valid identifier, an error is returned instead. -func (interp *interpreter) enumFieldValueFromProto( - mc *internal.MessageContext, - ed protoreflect.EnumDescriptor, - opt *descriptorpb.UninterpretedOption, - node ast.Node, -) (protoreflect.EnumNumber, protoreflect.Name, error) { - // We don't have to worry about allowing numbers because numbers are never allowed - // in uninterpreted values; they are only allowed inside aggregate values (i.e. - // message literals). - switch { - case opt.IdentifierValue != nil: - name := protoreflect.Name(opt.GetIdentifierValue()) - ev := ed.Values().ByName(name) - if ev == nil { - return 0, "", reporter.Errorf(interp.nodeInfo(node), "%venum %s has no value named %s", mc, ed.FullName(), name) - } - return ev.Number(), name, nil - default: - return 0, "", reporter.Errorf(interp.nodeInfo(node), "%vexpecting enum, got %s", mc, optionValueKind(opt)) - } -} - // scalarFieldValue resolves the given AST node val as a value whose type is assignable to a // field with the given fldType. -func (interp *interpreter) scalarFieldValue( - mc *internal.MessageContext, - fldType descriptorpb.FieldDescriptorProto_Type, - val ast.ValueNode, - insideMsgLiteral bool, -) (interface{}, error) { +func (interp *interpreter) scalarFieldValue(mc *internal.MessageContext, fldType descriptorpb.FieldDescriptorProto_Type, val ast.ValueNode, insideMsgLiteral bool) (interface{}, error) { v := val.Value() switch fldType { case descriptorpb.FieldDescriptorProto_TYPE_BOOL: @@ -1773,67 +1343,67 @@ func (interp *interpreter) scalarFieldValue( } } } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bool, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting bool, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_BYTES: if str, ok := v.(string); ok { return []byte(str), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bytes, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting bytes, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_STRING: if str, ok := v.(string); ok { return str, nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting string, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting string, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: if i, ok := v.(int64); ok { if i > math.MaxInt32 || i < math.MinInt32 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, i) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int32", mc, i) } return int32(i), nil } if ui, ok := v.(uint64); ok { if ui > math.MaxInt32 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, ui) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int32", mc, ui) } return int32(ui), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int32, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting int32, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32: if i, ok := v.(int64); ok { if i > math.MaxUint32 || i < 0 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, i) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint32", mc, i) } return uint32(i), nil } if ui, ok := v.(uint64); ok { if ui > math.MaxUint32 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, ui) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint32", mc, ui) } return uint32(ui), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint32, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting uint32, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: if i, ok := v.(int64); ok { return i, nil } if ui, ok := v.(uint64); ok { if ui > math.MaxInt64 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int64", mc, ui) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for int64", mc, ui) } return int64(ui), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int64, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting int64, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64: if i, ok := v.(int64); ok { if i < 0 { - return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint64", mc, i) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vvalue %d is out of range for uint64", mc, i) } return uint64(i), nil } if ui, ok := v.(uint64); ok { return ui, nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint64, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting uint64, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: if id, ok := v.(ast.Identifier); ok { switch id { @@ -1852,7 +1422,7 @@ func (interp *interpreter) scalarFieldValue( if u, ok := v.(uint64); ok { return float64(u), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting double, got %s", mc, valueKind(v)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting double, got %s", mc, valueKind(v)) case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: if id, ok := v.(ast.Identifier); ok { switch id { @@ -1871,138 +1441,9 @@ func (interp *interpreter) scalarFieldValue( if u, ok := v.(uint64); ok { return float32(u), nil } - return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting float, got %s", mc, valueKind(v)) - default: - return nil, reporter.Errorf(interp.nodeInfo(val), "%vunrecognized field type: %s", mc, fldType) - } -} - -// scalarFieldValue resolves the given uninterpreted option value as a value whose type is -// assignable to a field with the given fldType. -func (interp *interpreter) scalarFieldValueFromProto( - mc *internal.MessageContext, - fldType descriptorpb.FieldDescriptorProto_Type, - opt *descriptorpb.UninterpretedOption, - node ast.Node, -) (interface{}, error) { - switch fldType { - case descriptorpb.FieldDescriptorProto_TYPE_BOOL: - if opt.IdentifierValue != nil { - switch opt.GetIdentifierValue() { - case "true": - return true, nil - case "false": - return false, nil - } - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bool, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_BYTES: - if opt.StringValue != nil { - return opt.GetStringValue(), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bytes, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_STRING: - if opt.StringValue != nil { - return string(opt.GetStringValue()), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting string, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: - if opt.NegativeIntValue != nil { - i := opt.GetNegativeIntValue() - if i > math.MaxInt32 || i < math.MinInt32 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, i) - } - return int32(i), nil - } - if opt.PositiveIntValue != nil { - ui := opt.GetPositiveIntValue() - if ui > math.MaxInt32 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, ui) - } - return int32(ui), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int32, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32: - if opt.NegativeIntValue != nil { - i := opt.GetNegativeIntValue() - if i > math.MaxUint32 || i < 0 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, i) - } - return uint32(i), nil - } - if opt.PositiveIntValue != nil { - ui := opt.GetPositiveIntValue() - if ui > math.MaxUint32 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, ui) - } - return uint32(ui), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint32, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: - if opt.NegativeIntValue != nil { - return opt.GetNegativeIntValue(), nil - } - if opt.PositiveIntValue != nil { - ui := opt.GetPositiveIntValue() - if ui > math.MaxInt64 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int64", mc, ui) - } - return int64(ui), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int64, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64: - if opt.NegativeIntValue != nil { - i := opt.GetNegativeIntValue() - if i < 0 { - return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint64", mc, i) - } - // should not be possible since i should always be negative... - return uint64(i), nil - } - if opt.PositiveIntValue != nil { - return opt.GetPositiveIntValue(), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint64, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: - if opt.IdentifierValue != nil { - switch opt.GetIdentifierValue() { - case "inf": - return math.Inf(1), nil - case "nan": - return math.NaN(), nil - } - } - if opt.DoubleValue != nil { - return opt.GetDoubleValue(), nil - } - if opt.NegativeIntValue != nil { - return float64(opt.GetNegativeIntValue()), nil - } - if opt.PositiveIntValue != nil { - return float64(opt.GetPositiveIntValue()), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting double, got %s", mc, optionValueKind(opt)) - case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: - if opt.IdentifierValue != nil { - switch opt.GetIdentifierValue() { - case "inf": - return float32(math.Inf(1)), nil - case "nan": - return float32(math.NaN()), nil - } - } - if opt.DoubleValue != nil { - return float32(opt.GetDoubleValue()), nil - } - if opt.NegativeIntValue != nil { - return float32(opt.GetNegativeIntValue()), nil - } - if opt.PositiveIntValue != nil { - return float32(opt.GetPositiveIntValue()), nil - } - return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting float, got %s", mc, optionValueKind(opt)) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vexpecting float, got %s", mc, valueKind(v)) default: - return nil, reporter.Errorf(interp.nodeInfo(node), "%vunrecognized field type: %s", mc, fldType) + return nil, reporter.Errorf(interp.nodeInfo(val).Start(), "%vunrecognized field type: %s", mc, fldType) } } @@ -2033,25 +1474,17 @@ func descriptorType(m proto.Message) string { } } -// messageLiteralValue processes a message literal value. -// -// If the returned value is not valid, then an error occurred during processing. -// The returned err may be nil, however, as any errors will already have been -// handled (so the resulting error could be nil if the handler returned nil). -func (interp *interpreter) messageLiteralValue( - targetType descriptorpb.FieldOptions_OptionTargetType, - mc *internal.MessageContext, - fieldNodes []*ast.MessageFieldNode, - msg protoreflect.Message, - pathPrefix []int32, -) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) { - fmd := msg.Descriptor() +func (interp *interpreter) messageLiteralValue(mc *internal.MessageContext, fieldNodes []*ast.MessageFieldNode, fmd protoreflect.MessageDescriptor) (interpretedFieldValue, error) { + fdm := dynamicpb.NewMessage(fmd) origPath := mc.OptAggPath defer func() { mc.OptAggPath = origPath }() - flds := make(map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo, len(fieldNodes)) - var hadError bool + // NB: we don't want to leave this nil, even if the + // message is empty, because that indicates to + // caller that the result is not a message + flds := make([]*interpretedField, 0, len(fieldNodes)) + var foundAnyNode bool for _, fieldNode := range fieldNodes { if origPath == "" { mc.OptAggPath = fieldNode.Name.Value() @@ -2059,209 +1492,120 @@ func (interp *interpreter) messageLiteralValue( mc.OptAggPath = origPath + "." + fieldNode.Name.Value() } if fieldNode.Name.IsAnyTypeReference() { - if len(fieldNodes) > 1 { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vany type references cannot be repeated or mixed with other fields", mc) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - } - if fmd.FullName() != "google.protobuf.Any" { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.FullName()) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue - } - typeURLDescriptor := fmd.Fields().ByNumber(internal.AnyTypeURLTag) - var err error - switch { - case typeURLDescriptor == nil: - err = fmt.Errorf("message schema is missing type_url field (number %d)", internal.AnyTypeURLTag) - case typeURLDescriptor.IsList(): - err = fmt.Errorf("message schema has type_url field (number %d) that is a list but should be singular", internal.AnyTypeURLTag) - case typeURLDescriptor.Kind() != protoreflect.StringKind: - err = fmt.Errorf("message schema has type_url field (number %d) that is %s but should be string", internal.AnyTypeURLTag, typeURLDescriptor.Kind()) - } - if err != nil { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue - } - valueDescriptor := fmd.Fields().ByNumber(internal.AnyValueTag) - switch { - case valueDescriptor == nil: - err = fmt.Errorf("message schema is missing value field (number %d)", internal.AnyValueTag) - case valueDescriptor.IsList(): - err = fmt.Errorf("message schema has value field (number %d) that is a list but should be singular", internal.AnyValueTag) - case valueDescriptor.Kind() != protoreflect.BytesKind: - err = fmt.Errorf("message schema has value field (number %d) that is %s but should be bytes", internal.AnyValueTag, valueDescriptor.Kind()) + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.FullName()) } - if err != nil { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue + if foundAnyNode { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vmultiple any type references are not allowed", mc) } - + foundAnyNode = true urlPrefix := fieldNode.Name.URLPrefix.AsIdentifier() msgName := fieldNode.Name.Name.AsIdentifier() fullURL := fmt.Sprintf("%s/%s", urlPrefix, msgName) - // TODO: Support other URLs dynamically -- the caller of protocompile - // should be able to provide a custom resolver that can resolve type + // TODO: Support other URLs dynamically -- the caller of protoparse + // should be able to provide a fldNode custom resolver that can resolve type // URLs into message descriptors. The default resolver would be // implemented as below, only accepting "type.googleapis.com" and // "type.googleprod.com" as hosts/prefixes and using the compiled - // file's transitive closure to find the named message, since that - // is what protoc does. + // file's transitive closure to find the named message. if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vcould not resolve type reference %s", mc, fullURL) } anyFields, ok := fieldNode.Val.Value().([]*ast.MessageFieldNode) if !ok { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vtype references for google.protobuf.Any must have message literal value", mc) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "%vtype references for google.protobuf.Any must have message literal value", mc) } - anyMd := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, string(msgName)) + anyMd := interp.file.ResolveMessageType(protoreflect.FullName(msgName)) if anyMd == nil { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name.URLPrefix).Start(), "%vcould not resolve type reference %s", mc, fullURL) } // parse the message value - msgVal, valueSrcInfo, err := interp.messageLiteralValue(targetType, mc, anyFields, dynamicpb.NewMessage(anyMd), append(pathPrefix, internal.AnyValueTag)) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } else if !msgVal.IsValid() { - hadError = true - continue - } - - b, err := (proto.MarshalOptions{Deterministic: true}).Marshal(msgVal.Message().Interface()) + msgVal, err := interp.messageLiteralValue(mc, anyFields, anyMd) if err != nil { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vfailed to serialize message value: %w", mc, err) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - hadError = true - continue + return interpretedFieldValue{}, err } - // Success! - if !hadError { - msg.Set(typeURLDescriptor, protoreflect.ValueOfString(fullURL)) - msg.Set(valueDescriptor, protoreflect.ValueOfBytes(b)) - flds[fieldNode] = &valueSrcInfo + // Any is defined with two fields: + // string type_url = 1 + // bytes value = 2 + typeURLDescriptor := fmd.Fields().ByNumber(1) + if typeURLDescriptor == nil || typeURLDescriptor.Kind() != protoreflect.StringKind { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfailed to set type_url string field on Any: %w", mc, err) } - continue - } - - // Not expanded Any syntax; handle normal field. - var ffld protoreflect.FieldDescriptor - var err error - if fieldNode.Name.IsExtension() { - n := interp.file.ResolveMessageLiteralExtensionName(fieldNode.Name.Name) - if n == "" { - // this should not be possible! - n = string(fieldNode.Name.Name.AsIdentifier()) + fdm.Set(typeURLDescriptor, protoreflect.ValueOfString(fullURL)) + valueDescriptor := fmd.Fields().ByNumber(2) + if valueDescriptor == nil || valueDescriptor.Kind() != protoreflect.BytesKind { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfailed to set value bytes field on Any: %w", mc, err) } - ffld, err = interp.resolveExtensionType(n) - if errors.Is(err, protoregistry.NotFound) { - // may need to qualify with package name - // (this should not be necessary!) - pkg := mc.File.FileDescriptorProto().GetPackage() - if pkg != "" { - ffld, err = interp.resolveExtensionType(pkg + "." + n) - } + b, err := proto.MarshalOptions{Deterministic: true}.Marshal(msgVal.val.Message().Interface()) + if err != nil { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "%vfailed to serialize message value: %w", mc, err) } + fdm.Set(valueDescriptor, protoreflect.ValueOfBytes(b)) } else { - ffld = fmd.Fields().ByName(protoreflect.Name(fieldNode.Name.Value())) - if ffld == nil { - err = protoregistry.NotFound - // It could be a proto2 group, where the text format refers to the group type - // name, and the field name is the lower-cased form of that. - ffld = fmd.Fields().ByName(protoreflect.Name(strings.ToLower(fieldNode.Name.Value()))) - if ffld != nil { - // In editions, we support using the group type name only for fields that - // "look like" proto2 groups. - if protoreflect.Name(fieldNode.Name.Value()) == ffld.Message().Name() && // text format uses type name - ffld.Message().FullName().Parent() == ffld.FullName().Parent() && // message and field declared in same scope - ffld.Kind() == protoreflect.GroupKind /* uses delimited encoding */ { - // This one looks like a proto2 group, so it's a keeper. - err = nil - } else { - // It doesn't look like a proto2 group, so this is not a match. - ffld = nil + var ffld protoreflect.FieldDescriptor + if fieldNode.Name.IsExtension() { + n := interp.file.ResolveMessageLiteralExtensionName(fieldNode.Name.Name) + if n == "" { + // this should not be possible! + n = string(fieldNode.Name.Name.AsIdentifier()) + } + ffld = interp.file.ResolveExtension(protoreflect.FullName(n)) + if ffld == nil { + // may need to qualify with package name + // (this should not be necessary!) + pkg := mc.File.FileDescriptorProto().GetPackage() + if pkg != "" { + ffld = interp.file.ResolveExtension(protoreflect.FullName(pkg + "." + n)) + } + } + } else { + ffld = fmd.Fields().ByName(protoreflect.Name(fieldNode.Name.Value())) + // Groups are indicated in the text format by the group name (which is + // camel-case), NOT the field name (which is lower-case). + // ...but only regular fields, not extensions that are groups... + if ffld != nil && ffld.Kind() == protoreflect.GroupKind && ffld.Message().Name() != protoreflect.Name(fieldNode.Name.Value()) { + // this is kind of silly to fail here, but this mimics protoc behavior + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfield %s not found (did you mean the group named %s?)", mc, fieldNode.Name.Value(), ffld.Message().Name()) + } + if ffld == nil { + // could be a group name + for i := 0; i < fmd.Fields().Len(); i++ { + fd := fmd.Fields().Get(i) + if fd.Kind() == protoreflect.GroupKind && fd.Message().Name() == protoreflect.Name(fieldNode.Name.Value()) { + // found it! + ffld = fd + break + } } } } - } - if errors.Is(err, protoregistry.NotFound) { - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%vfield %s not found", mc, string(fieldNode.Name.Name.AsIdentifier())) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + if ffld == nil { + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Name).Start(), "%vfield %s not found", mc, string(fieldNode.Name.Name.AsIdentifier())) } - hadError = true - continue - } else if err != nil { - err := interp.handleErrorWithPos(interp.nodeInfo(fieldNode.Name), err) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + if fieldNode.Sep == nil && ffld.Message() == nil { + // If there is no separator, the field type should be a message. + // Otherwise it is an error in the text format. + return interpretedFieldValue{}, reporter.Errorf(interp.nodeInfo(fieldNode.Val).Start(), "syntax error: unexpected value, expecting ':'") } - hadError = true - continue - } - if err := interp.checkFieldUsage(targetType, ffld, fieldNode.Name); err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - if fieldNode.Sep == nil && ffld.Message() == nil { - // If there is no separator, the field type should be a message. - // Otherwise, it is an error in the text format. - err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "syntax error: unexpected value, expecting ':'") + res, err := interp.setOptionField(mc, fdm, ffld, fieldNode.Name, fieldNode.Val, true) if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err + return interpretedFieldValue{}, err } - hadError = true - continue - } - srcInfo, err := interp.setOptionField(targetType, mc, msg, ffld, fieldNode.Name, fieldNode.Val, true, append(pathPrefix, int32(ffld.Number()))) - if err != nil { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err - } - if srcInfo != nil { - flds[fieldNode] = srcInfo + flds = append(flds, &interpretedField{ + number: int32(ffld.Number()), + kind: ffld.Kind(), + repeated: ffld.Cardinality() == protoreflect.Repeated, + packed: ffld.IsPacked(), + value: res, + // NB: no need to set index here, inside message literal + // (it is only used for top-level options, for emitting + // source code info) + }) } } - if hadError { - return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, nil - } - return protoreflect.ValueOfMessage(msg), - newSrcInfo(pathPrefix, &sourceinfo.MessageLiteralSourceInfo{Fields: flds}), - nil -} - -func newSrcInfo(path []int32, children sourceinfo.OptionChildrenSourceInfo) sourceinfo.OptionSourceInfo { - return sourceinfo.OptionSourceInfo{ - Path: internal.ClonePath(path), - Children: children, - } + return interpretedFieldValue{ + val: protoreflect.ValueOfMessage(fdm), + msgVal: flds, + }, nil } diff --git a/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go b/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go deleted file mode 100644 index 05c3e292..00000000 --- a/vendor/github.com/bufbuild/protocompile/options/source_retention_options.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package options - -import ( - "fmt" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/descriptorpb" - - "github.com/bufbuild/protocompile/internal" -) - -// StripSourceRetentionOptionsFromFile returns a file descriptor proto that omits any -// options in file that are defined to be retained only in source. If file has no -// such options, then it is returned as is. If it does have such options, a copy is -// made; the given file will not be mutated. -// -// Even when a copy is returned, it is not a deep copy: it may share data with the -// original file. So callers should not mutate the returned file unless mutating the -// input file is also safe. -func StripSourceRetentionOptionsFromFile(file *descriptorpb.FileDescriptorProto) (*descriptorpb.FileDescriptorProto, error) { - var path sourcePath - var removedPaths *sourcePathTrie - if file.SourceCodeInfo != nil && len(file.SourceCodeInfo.Location) > 0 { - path = make(sourcePath, 0, 16) - removedPaths = &sourcePathTrie{} - } - var dirty bool - optionsPath := path.push(internal.FileOptionsTag) - newOpts, err := stripSourceRetentionOptions(file.GetOptions(), optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts != file.GetOptions() { - dirty = true - } - msgsPath := path.push(internal.FileMessagesTag) - newMsgs, changed, err := stripOptionsFromAll(file.GetMessageType(), stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - enumsPath := path.push(internal.FileEnumsTag) - newEnums, changed, err := stripOptionsFromAll(file.GetEnumType(), stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - extsPath := path.push(internal.FileExtensionsTag) - newExts, changed, err := stripOptionsFromAll(file.GetExtension(), stripSourceRetentionOptionsFromField, extsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - svcsPath := path.push(internal.FileServicesTag) - newSvcs, changed, err := stripOptionsFromAll(file.GetService(), stripSourceRetentionOptionsFromService, svcsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - - if !dirty { - return file, nil - } - - newFile, err := shallowCopy(file) - if err != nil { - return nil, err - } - newFile.Options = newOpts - newFile.MessageType = newMsgs - newFile.EnumType = newEnums - newFile.Extension = newExts - newFile.Service = newSvcs - newFile.SourceCodeInfo = stripSourcePathsForSourceRetentionOptions(newFile.SourceCodeInfo, removedPaths) - return newFile, nil -} - -type sourcePath protoreflect.SourcePath - -func (p sourcePath) push(element int32) sourcePath { - if p == nil { - return nil - } - return append(p, element) -} - -type sourcePathTrie struct { - removed bool - children map[int32]*sourcePathTrie -} - -func (t *sourcePathTrie) addPath(p sourcePath) { - if t == nil { - return - } - if len(p) == 0 { - t.removed = true - return - } - child := t.children[p[0]] - if child == nil { - if t.children == nil { - t.children = map[int32]*sourcePathTrie{} - } - child = &sourcePathTrie{} - t.children[p[0]] = child - } - child.addPath(p[1:]) -} - -func (t *sourcePathTrie) isRemoved(p []int32) bool { - if t == nil { - return false - } - if t.removed { - return true - } - if len(p) == 0 { - return false - } - child := t.children[p[0]] - if child == nil { - return false - } - return child.isRemoved(p[1:]) -} - -func stripSourceRetentionOptions[M proto.Message]( - options M, - path sourcePath, - removedPaths *sourcePathTrie, -) (M, error) { - optionsRef := options.ProtoReflect() - // See if there are any options to strip. - var hasFieldToStrip bool - var numFieldsToKeep int - var err error - optionsRef.Range(func(field protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions) - if !ok { - err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts) - return false - } - if fieldOpts.GetRetention() == descriptorpb.FieldOptions_RETENTION_SOURCE { - hasFieldToStrip = true - } else { - numFieldsToKeep++ - } - return true - }) - var zero M - if err != nil { - return zero, err - } - if !hasFieldToStrip { - return options, nil - } - - if numFieldsToKeep == 0 { - // Stripping the message would remove *all* options. In that case, - // we'll clear out the options by returning the zero value (i.e. nil). - removedPaths.addPath(path) // clear out all source locations, too - return zero, nil - } - - // There is at least one option to remove. So we need to make a copy that does not have those options. - newOptions := optionsRef.New() - ret, ok := newOptions.Interface().(M) - if !ok { - return zero, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", newOptions.Interface(), zero) - } - optionsRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool { - fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions) - if !ok { - err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts) - return false - } - if fieldOpts.GetRetention() != descriptorpb.FieldOptions_RETENTION_SOURCE { - newOptions.Set(field, val) - } else { - removedPaths.addPath(path.push(int32(field.Number()))) - } - return true - }) - if err != nil { - return zero, err - } - return ret, nil -} - -func stripSourceRetentionOptionsFromMessage( - msg *descriptorpb.DescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.DescriptorProto, error) { - var dirty bool - optionsPath := path.push(internal.MessageOptionsTag) - newOpts, err := stripSourceRetentionOptions(msg.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts != msg.Options { - dirty = true - } - fieldsPath := path.push(internal.MessageFieldsTag) - newFields, changed, err := stripOptionsFromAll(msg.Field, stripSourceRetentionOptionsFromField, fieldsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - oneofsPath := path.push(internal.MessageOneofsTag) - newOneofs, changed, err := stripOptionsFromAll(msg.OneofDecl, stripSourceRetentionOptionsFromOneof, oneofsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - extRangesPath := path.push(internal.MessageExtensionRangesTag) - newExtRanges, changed, err := stripOptionsFromAll(msg.ExtensionRange, stripSourceRetentionOptionsFromExtensionRange, extRangesPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - msgsPath := path.push(internal.MessageNestedMessagesTag) - newMsgs, changed, err := stripOptionsFromAll(msg.NestedType, stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - enumsPath := path.push(internal.MessageEnumsTag) - newEnums, changed, err := stripOptionsFromAll(msg.EnumType, stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - extsPath := path.push(internal.MessageExtensionsTag) - newExts, changed, err := stripOptionsFromAll(msg.Extension, stripSourceRetentionOptionsFromField, extsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - - if !dirty { - return msg, nil - } - - newMsg, err := shallowCopy(msg) - if err != nil { - return nil, err - } - newMsg.Options = newOpts - newMsg.Field = newFields - newMsg.OneofDecl = newOneofs - newMsg.ExtensionRange = newExtRanges - newMsg.NestedType = newMsgs - newMsg.EnumType = newEnums - newMsg.Extension = newExts - return newMsg, nil -} - -func stripSourceRetentionOptionsFromField( - field *descriptorpb.FieldDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.FieldDescriptorProto, error) { - optionsPath := path.push(internal.FieldOptionsTag) - newOpts, err := stripSourceRetentionOptions(field.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts == field.Options { - return field, nil - } - newField, err := shallowCopy(field) - if err != nil { - return nil, err - } - newField.Options = newOpts - return newField, nil -} - -func stripSourceRetentionOptionsFromOneof( - oneof *descriptorpb.OneofDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.OneofDescriptorProto, error) { - optionsPath := path.push(internal.OneofOptionsTag) - newOpts, err := stripSourceRetentionOptions(oneof.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts == oneof.Options { - return oneof, nil - } - newOneof, err := shallowCopy(oneof) - if err != nil { - return nil, err - } - newOneof.Options = newOpts - return newOneof, nil -} - -func stripSourceRetentionOptionsFromExtensionRange( - extRange *descriptorpb.DescriptorProto_ExtensionRange, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.DescriptorProto_ExtensionRange, error) { - optionsPath := path.push(internal.ExtensionRangeOptionsTag) - newOpts, err := stripSourceRetentionOptions(extRange.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts == extRange.Options { - return extRange, nil - } - newExtRange, err := shallowCopy(extRange) - if err != nil { - return nil, err - } - newExtRange.Options = newOpts - return newExtRange, nil -} - -func stripSourceRetentionOptionsFromEnum( - enum *descriptorpb.EnumDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.EnumDescriptorProto, error) { - var dirty bool - optionsPath := path.push(internal.EnumOptionsTag) - newOpts, err := stripSourceRetentionOptions(enum.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts != enum.Options { - dirty = true - } - valsPath := path.push(internal.EnumValuesTag) - newVals, changed, err := stripOptionsFromAll(enum.Value, stripSourceRetentionOptionsFromEnumValue, valsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - - if !dirty { - return enum, nil - } - - newEnum, err := shallowCopy(enum) - if err != nil { - return nil, err - } - newEnum.Options = newOpts - newEnum.Value = newVals - return newEnum, nil -} - -func stripSourceRetentionOptionsFromEnumValue( - enumVal *descriptorpb.EnumValueDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.EnumValueDescriptorProto, error) { - optionsPath := path.push(internal.EnumValOptionsTag) - newOpts, err := stripSourceRetentionOptions(enumVal.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts == enumVal.Options { - return enumVal, nil - } - newEnumVal, err := shallowCopy(enumVal) - if err != nil { - return nil, err - } - newEnumVal.Options = newOpts - return newEnumVal, nil -} - -func stripSourceRetentionOptionsFromService( - svc *descriptorpb.ServiceDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.ServiceDescriptorProto, error) { - var dirty bool - optionsPath := path.push(internal.ServiceOptionsTag) - newOpts, err := stripSourceRetentionOptions(svc.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts != svc.Options { - dirty = true - } - methodsPath := path.push(internal.ServiceMethodsTag) - newMethods, changed, err := stripOptionsFromAll(svc.Method, stripSourceRetentionOptionsFromMethod, methodsPath, removedPaths) - if err != nil { - return nil, err - } - if changed { - dirty = true - } - - if !dirty { - return svc, nil - } - - newSvc, err := shallowCopy(svc) - if err != nil { - return nil, err - } - newSvc.Options = newOpts - newSvc.Method = newMethods - return newSvc, nil -} - -func stripSourceRetentionOptionsFromMethod( - method *descriptorpb.MethodDescriptorProto, - path sourcePath, - removedPaths *sourcePathTrie, -) (*descriptorpb.MethodDescriptorProto, error) { - optionsPath := path.push(internal.MethodOptionsTag) - newOpts, err := stripSourceRetentionOptions(method.Options, optionsPath, removedPaths) - if err != nil { - return nil, err - } - if newOpts == method.Options { - return method, nil - } - newMethod, err := shallowCopy(method) - if err != nil { - return nil, err - } - newMethod.Options = newOpts - return newMethod, nil -} - -func stripSourcePathsForSourceRetentionOptions( - sourceInfo *descriptorpb.SourceCodeInfo, - removedPaths *sourcePathTrie, -) *descriptorpb.SourceCodeInfo { - if sourceInfo == nil || len(sourceInfo.Location) == 0 || removedPaths == nil { - // nothing to do - return sourceInfo - } - newLocations := make([]*descriptorpb.SourceCodeInfo_Location, len(sourceInfo.Location)) - var i int - for _, loc := range sourceInfo.Location { - if removedPaths.isRemoved(loc.Path) { - continue - } - newLocations[i] = loc - i++ - } - newLocations = newLocations[:i] - return &descriptorpb.SourceCodeInfo{Location: newLocations} -} - -func shallowCopy[M proto.Message](msg M) (M, error) { - msgRef := msg.ProtoReflect() - other := msgRef.New() - ret, ok := other.Interface().(M) - if !ok { - return ret, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", other.Interface(), ret) - } - msgRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool { - other.Set(field, val) - return true - }) - return ret, nil -} - -// stripOptionsFromAll applies the given function to each element in the given -// slice in order to remove source-retention options from it. It returns the new -// slice and a bool indicating whether anything was actually changed. If the -// second value is false, then the returned slice is the same slice as the input -// slice. Usually, T is a pointer type, in which case the given updateFunc should -// NOT mutate the input value. Instead, it should return the input value if only -// if there is no update needed. If a mutation is needed, it should return a new -// value. -func stripOptionsFromAll[T comparable]( - slice []T, - updateFunc func(T, sourcePath, *sourcePathTrie) (T, error), - path sourcePath, - removedPaths *sourcePathTrie, -) ([]T, bool, error) { - var updated []T // initialized lazily, only when/if a copy is needed - for i, item := range slice { - newItem, err := updateFunc(item, path.push(int32(i)), removedPaths) - if err != nil { - return nil, false, err - } - if updated != nil { - updated[i] = newItem - } else if newItem != item { - updated = make([]T, len(slice)) - copy(updated[:i], slice) - updated[i] = newItem - } - } - if updated != nil { - return updated, true, nil - } - return slice, false, nil -} diff --git a/vendor/github.com/bufbuild/protocompile/options/target_types.go b/vendor/github.com/bufbuild/protocompile/options/target_types.go deleted file mode 100644 index 0d780754..00000000 --- a/vendor/github.com/bufbuild/protocompile/options/target_types.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package options - -import ( - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/descriptorpb" -) - -type optionsType[T any] interface { - *T - proto.Message - GetFeatures() *descriptorpb.FeatureSet - GetUninterpretedOption() []*descriptorpb.UninterpretedOption -} - -type elementType[OptsStruct any, Opts optionsType[OptsStruct]] interface { - proto.Message - GetOptions() Opts -} - -type targetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]] struct { - t descriptorpb.FieldOptions_OptionTargetType - setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption) - setOptions func(elem Elem, opts Opts) -} - -var ( - targetTypeFile = newTargetType[*descriptorpb.FileDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_FILE, setUninterpretedFileOptions, setFileOptions, - ) - targetTypeMessage = newTargetType[*descriptorpb.DescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE, setUninterpretedMessageOptions, setMessageOptions, - ) - targetTypeField = newTargetType[*descriptorpb.FieldDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_FIELD, setUninterpretedFieldOptions, setFieldOptions, - ) - targetTypeOneof = newTargetType[*descriptorpb.OneofDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_ONEOF, setUninterpretedOneofOptions, setOneofOptions, - ) - targetTypeExtensionRange = newTargetType[*descriptorpb.DescriptorProto_ExtensionRange]( - descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE, setUninterpretedExtensionRangeOptions, setExtensionRangeOptions, - ) - targetTypeEnum = newTargetType[*descriptorpb.EnumDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_ENUM, setUninterpretedEnumOptions, setEnumOptions, - ) - targetTypeEnumValue = newTargetType[*descriptorpb.EnumValueDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY, setUninterpretedEnumValueOptions, setEnumValueOptions, - ) - targetTypeService = newTargetType[*descriptorpb.ServiceDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_SERVICE, setUninterpretedServiceOptions, setServiceOptions, - ) - targetTypeMethod = newTargetType[*descriptorpb.MethodDescriptorProto]( - descriptorpb.FieldOptions_TARGET_TYPE_METHOD, setUninterpretedMethodOptions, setMethodOptions, - ) -) - -func newTargetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]]( - t descriptorpb.FieldOptions_OptionTargetType, - setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption), - setOptions func(elem Elem, opts Opts), -) *targetType[Elem, OptsStruct, Opts] { - return &targetType[Elem, OptsStruct, Opts]{ - t: t, - setUninterpretedOptions: setUninterpretedOptions, - setOptions: setOptions, - } -} - -func setUninterpretedFileOptions(opts *descriptorpb.FileOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedMessageOptions(opts *descriptorpb.MessageOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedFieldOptions(opts *descriptorpb.FieldOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedOneofOptions(opts *descriptorpb.OneofOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedExtensionRangeOptions(opts *descriptorpb.ExtensionRangeOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedEnumOptions(opts *descriptorpb.EnumOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedEnumValueOptions(opts *descriptorpb.EnumValueOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedServiceOptions(opts *descriptorpb.ServiceOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setUninterpretedMethodOptions(opts *descriptorpb.MethodOptions, uninterpreted []*descriptorpb.UninterpretedOption) { - opts.UninterpretedOption = uninterpreted -} - -func setFileOptions(desc *descriptorpb.FileDescriptorProto, opts *descriptorpb.FileOptions) { - desc.Options = opts -} - -func setMessageOptions(desc *descriptorpb.DescriptorProto, opts *descriptorpb.MessageOptions) { - desc.Options = opts -} - -func setFieldOptions(desc *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) { - desc.Options = opts -} - -func setOneofOptions(desc *descriptorpb.OneofDescriptorProto, opts *descriptorpb.OneofOptions) { - desc.Options = opts -} - -func setExtensionRangeOptions(desc *descriptorpb.DescriptorProto_ExtensionRange, opts *descriptorpb.ExtensionRangeOptions) { - desc.Options = opts -} - -func setEnumOptions(desc *descriptorpb.EnumDescriptorProto, opts *descriptorpb.EnumOptions) { - desc.Options = opts -} - -func setEnumValueOptions(desc *descriptorpb.EnumValueDescriptorProto, opts *descriptorpb.EnumValueOptions) { - desc.Options = opts -} - -func setServiceOptions(desc *descriptorpb.ServiceDescriptorProto, opts *descriptorpb.ServiceOptions) { - desc.Options = opts -} - -func setMethodOptions(desc *descriptorpb.MethodDescriptorProto, opts *descriptorpb.MethodOptions) { - desc.Options = opts -} diff --git a/vendor/github.com/bufbuild/protocompile/parser/ast.go b/vendor/github.com/bufbuild/protocompile/parser/ast.go index f58f7ae4..105502d4 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/ast.go +++ b/vendor/github.com/bufbuild/protocompile/parser/ast.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,67 +14,108 @@ package parser -import ( - "github.com/bufbuild/protocompile/ast" -) +import "github.com/bufbuild/protocompile/ast" -// the types below are accumulator types, just used in intermediate productions -// to accumulate slices that will get stored in AST nodes +// the types below are accumulator types: linked lists that are +// constructed during parsing and then converted to slices of AST nodes +// once the whole list has been parsed +// TODO: change grammar to use slices of nodes instead of these constructions -type compactOptionSlices struct { - options []*ast.OptionNode - commas []*ast.RuneNode +type compactOptionList struct { + option *ast.OptionNode + comma *ast.RuneNode + next *compactOptionList } -func toStringValueNode(strs []*ast.StringLiteralNode) ast.StringValueNode { - if len(strs) == 1 { - return strs[0] +func (list *compactOptionList) toNodes() ([]*ast.OptionNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ } - return ast.NewCompoundLiteralStringNode(strs...) + opts := make([]*ast.OptionNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + opts[i] = cur.option + if cur.comma != nil { + commas[i] = cur.comma + } + } + return opts, commas } -type nameSlices struct { - // only names or idents will be set, never both - names []ast.StringValueNode - idents []*ast.IdentNode - commas []*ast.RuneNode +type stringList struct { + str *ast.StringLiteralNode + next *stringList } -type rangeSlices struct { - ranges []*ast.RangeNode - commas []*ast.RuneNode +func (list *stringList) toStringValueNode() ast.StringValueNode { + if list.next == nil { + // single name + return list.str + } + + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + strs := make([]*ast.StringLiteralNode, l) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + strs[i] = cur.str + } + return ast.NewCompoundLiteralStringNode(strs...) } -type valueSlices struct { - vals []ast.ValueNode - commas []*ast.RuneNode +type nameList struct { + name ast.StringValueNode + comma *ast.RuneNode + next *nameList } -type fieldRefSlices struct { - refs []*ast.FieldReferenceNode - dots []*ast.RuneNode +func (list *nameList) toNodes() ([]ast.StringValueNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + names := make([]ast.StringValueNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + names[i] = cur.name + if cur.comma != nil { + commas[i] = cur.comma + } + } + return names, commas } -type identSlices struct { - idents []*ast.IdentNode - dots []*ast.RuneNode +type rangeList struct { + rng *ast.RangeNode + comma *ast.RuneNode + next *rangeList } -func (s *identSlices) toIdentValueNode(leadingDot *ast.RuneNode) ast.IdentValueNode { - if len(s.idents) == 1 && leadingDot == nil { - // single simple name - return s.idents[0] +func (list *rangeList) toNodes() ([]*ast.RangeNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + ranges := make([]*ast.RangeNode, l) + commas := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + ranges[i] = cur.rng + if cur.comma != nil { + commas[i] = cur.comma + } } - return ast.NewCompoundIdentNode(leadingDot, s.idents, s.dots) + return ranges, commas } -type messageFieldList struct { - field *ast.MessageFieldNode - delimiter *ast.RuneNode - next *messageFieldList +type valueList struct { + val ast.ValueNode + comma *ast.RuneNode + next *valueList } -func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNode) { +func (list *valueList) toNodes() ([]ast.ValueNode, []*ast.RuneNode) { if list == nil { return nil, nil } @@ -82,63 +123,94 @@ func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNod for cur := list; cur != nil; cur = cur.next { l++ } - fields := make([]*ast.MessageFieldNode, l) - delimiters := make([]*ast.RuneNode, l) + vals := make([]ast.ValueNode, l) + commas := make([]*ast.RuneNode, l-1) for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { - fields[i] = cur.field - if cur.delimiter != nil { - delimiters[i] = cur.delimiter + vals[i] = cur.val + if cur.comma != nil { + commas[i] = cur.comma } } - return fields, delimiters + return vals, commas } -func prependRunes[T ast.Node](convert func(*ast.RuneNode) T, runes []*ast.RuneNode, elements []T) []T { - elems := make([]T, 0, len(runes)+len(elements)) - for _, rune := range runes { - elems = append(elems, convert(rune)) - } - elems = append(elems, elements...) - return elems +type fieldRefList struct { + ref *ast.FieldReferenceNode + dot *ast.RuneNode + next *fieldRefList } -func toServiceElement(semi *ast.RuneNode) ast.ServiceElement { - return ast.NewEmptyDeclNode(semi) -} +func (list *fieldRefList) toNodes() ([]*ast.FieldReferenceNode, []*ast.RuneNode) { + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + refs := make([]*ast.FieldReferenceNode, l) + dots := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + refs[i] = cur.ref + if cur.dot != nil { + dots[i] = cur.dot + } + } -func toMethodElement(semi *ast.RuneNode) ast.RPCElement { - return ast.NewEmptyDeclNode(semi) + return refs, dots } -func toFileElement(semi *ast.RuneNode) ast.FileElement { - return ast.NewEmptyDeclNode(semi) +type identList struct { + ident *ast.IdentNode + dot *ast.RuneNode + next *identList } -func toEnumElement(semi *ast.RuneNode) ast.EnumElement { - return ast.NewEmptyDeclNode(semi) -} +func (list *identList) toIdentValueNode(leadingDot *ast.RuneNode) ast.IdentValueNode { + if list.next == nil && leadingDot == nil { + // single name + return list.ident + } + + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ + } + idents := make([]*ast.IdentNode, l) + dots := make([]*ast.RuneNode, l-1) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + idents[i] = cur.ident + if cur.dot != nil { + dots[i] = cur.dot + } + } -func toMessageElement(semi *ast.RuneNode) ast.MessageElement { - return ast.NewEmptyDeclNode(semi) + return ast.NewCompoundIdentNode(leadingDot, idents, dots) } -type nodeWithRunes[T ast.Node] struct { - Node T - Runes []*ast.RuneNode +type messageFieldEntry struct { + field *ast.MessageFieldNode + delimiter *ast.RuneNode } -func newNodeWithRunes[T ast.Node](node T, trailingRunes ...*ast.RuneNode) nodeWithRunes[T] { - return nodeWithRunes[T]{ - Node: node, - Runes: trailingRunes, - } +type messageFieldList struct { + field *messageFieldEntry + next *messageFieldList } -func toElements[T ast.Node](convert func(*ast.RuneNode) T, node T, runes []*ast.RuneNode) []T { - elements := make([]T, 1+len(runes)) - elements[0] = node - for i, rune := range runes { - elements[i+1] = convert(rune) +func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNode) { + if list == nil { + return nil, nil + } + l := 0 + for cur := list; cur != nil; cur = cur.next { + l++ } - return elements + fields := make([]*ast.MessageFieldNode, l) + delimiters := make([]*ast.RuneNode, l) + for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 { + fields[i] = cur.field.field + if cur.field.delimiter != nil { + delimiters[i] = cur.field.delimiter + } + } + + return fields, delimiters } diff --git a/vendor/github.com/bufbuild/protocompile/parser/clone.go b/vendor/github.com/bufbuild/protocompile/parser/clone.go index 04322486..7c5505f2 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/clone.go +++ b/vendor/github.com/bufbuild/protocompile/parser/clone.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -106,7 +106,6 @@ func recreateNodeIndexForMessage(orig, clone *result, origProto, cloneProto *des } for i, origExtr := range origProto.ExtensionRange { cloneExtr := cloneProto.ExtensionRange[i] - updateNodeIndex(orig, clone, asExtsNode(origExtr), asExtsNode(cloneExtr)) updateNodeIndexWithOptions[*descriptorpb.ExtensionRangeOptions](orig, clone, origExtr, cloneExtr) } for i, origRr := range origProto.ReservedRange { diff --git a/vendor/github.com/bufbuild/protocompile/parser/doc.go b/vendor/github.com/bufbuild/protocompile/parser/doc.go index 40555543..d4e2c75a 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/doc.go +++ b/vendor/github.com/bufbuild/protocompile/parser/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/parser/errors.go b/vendor/github.com/bufbuild/protocompile/parser/errors.go index e78bddad..6e34bb8c 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/errors.go +++ b/vendor/github.com/bufbuild/protocompile/parser/errors.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/parser/lexer.go b/vendor/github.com/bufbuild/protocompile/parser/lexer.go index 71cbc7ac..e4701faa 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/lexer.go +++ b/vendor/github.com/bufbuild/protocompile/parser/lexer.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -113,7 +113,6 @@ func newLexer(in io.Reader, filename string, handler *reporter.Handler) (*protoL var keywords = map[string]int{ "syntax": _SYNTAX, - "edition": _EDITION, "import": _IMPORT, "weak": _WEAK, "public": _PUBLIC, @@ -742,8 +741,7 @@ func (l *protoLex) skipToEndOfBlockComment(lval *protoSymType) (ok, hasErr bool) func (l *protoLex) addSourceError(err error) (reporter.ErrorWithPos, bool) { ewp, ok := err.(reporter.ErrorWithPos) if !ok { - // TODO: Store the previous span instead of just the position. - ewp = reporter.Error(ast.NewSourceSpan(l.prev(), l.prev()), err) + ewp = reporter.Error(l.prev(), err) } handlerErr := l.handler.HandleError(ewp) return ewp, handlerErr == nil @@ -753,19 +751,10 @@ func (l *protoLex) Error(s string) { _, _ = l.addSourceError(errors.New(s)) } -// TODO: Accept both a start and end offset, and use that to create a span. func (l *protoLex) errWithCurrentPos(err error, offset int) reporter.ErrorWithPos { if ewp, ok := err.(reporter.ErrorWithPos); ok { return ewp } pos := l.info.SourcePos(l.input.offset() + offset) - return reporter.Error(ast.NewSourceSpan(pos, pos), err) -} - -func (l *protoLex) requireSemicolon(semicolons []*ast.RuneNode) (*ast.RuneNode, []*ast.RuneNode) { - if len(semicolons) == 0 { - l.Error("syntax error: expecting ';'") - return nil, nil - } - return semicolons[0], semicolons[1:] + return reporter.Error(pos, err) } diff --git a/vendor/github.com/bufbuild/protocompile/parser/parser.go b/vendor/github.com/bufbuild/protocompile/parser/parser.go index 21314d5a..c6818637 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/parser.go +++ b/vendor/github.com/bufbuild/protocompile/parser/parser.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,8 +25,7 @@ import ( "github.com/bufbuild/protocompile/reporter" ) -// The path ../.tmp/bin/goyacc is built when using `make generate` from repo root. -//go:generate ../.tmp/bin/goyacc -o proto.y.go -l -p proto proto.y +//go:generate goyacc -o proto.y.go -l -p proto proto.y func init() { protoErrorVerbose = true @@ -151,23 +150,16 @@ type Result interface { // FileDescriptorProto hierarchy. If this result has no AST, this returns a // placeholder node. FieldNode(*descriptorpb.FieldDescriptorProto) ast.FieldDeclNode - // OneofNode returns the AST node corresponding to the given oneof. This can + // OneOfNode returns the AST node corresponding to the given oneof. This can // return nil, such as if the given oneof is not part of the // FileDescriptorProto hierarchy. If this result has no AST, this returns a // placeholder node. - OneofNode(*descriptorpb.OneofDescriptorProto) ast.OneofDeclNode + OneOfNode(*descriptorpb.OneofDescriptorProto) ast.Node // ExtensionRangeNode returns the AST node corresponding to the given // extension range. This can return nil, such as if the given range is not // part of the FileDescriptorProto hierarchy. If this result has no AST, // this returns a placeholder node. ExtensionRangeNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode - - // ExtensionsNode returns the AST node corresponding to the "extensions" - // statement in a message that corresponds to the given range. This will be - // the parent of the node returned by ExtensionRangeNode, which contains the - // options that apply to all child ranges. - ExtensionsNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions - // MessageReservedRangeNode returns the AST node corresponding to the given // reserved range. This can return nil, such as if the given range is not // part of the FileDescriptorProto hierarchy. If this result has no AST, @@ -177,7 +169,7 @@ type Result interface { // return nil, such as if the given enum is not part of the // FileDescriptorProto hierarchy. If this result has no AST, this returns a // placeholder node. - EnumNode(*descriptorpb.EnumDescriptorProto) ast.NodeWithOptions + EnumNode(*descriptorpb.EnumDescriptorProto) ast.Node // EnumValueNode returns the AST node corresponding to the given enum. This // can return nil, such as if the given enum value is not part of the // FileDescriptorProto hierarchy. If this result has no AST, this returns a @@ -192,7 +184,7 @@ type Result interface { // can return nil, such as if the given service is not part of the // FileDescriptorProto hierarchy. If this result has no AST, this returns a // placeholder node. - ServiceNode(*descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions + ServiceNode(*descriptorpb.ServiceDescriptorProto) ast.Node // MethodNode returns the AST node corresponding to the given method. This // can return nil, such as if the given method is not part of the // FileDescriptorProto hierarchy. If this result has no AST, this returns a diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y b/vendor/github.com/bufbuild/protocompile/parser/proto.y index e66cabda..78a6c806 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/proto.y +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y @@ -5,7 +5,6 @@ package parser import ( "math" - "strings" "github.com/bufbuild/protocompile/ast" ) @@ -15,125 +14,122 @@ import ( // fields inside this union end up as the fields in a structure known // as ${PREFIX}SymType, of which a reference is passed to the lexer. %union{ - file *ast.FileNode - syn *ast.SyntaxNode - ed *ast.EditionNode - fileElements []ast.FileElement - pkg nodeWithRunes[*ast.PackageNode] - imprt nodeWithRunes[*ast.ImportNode] - msg nodeWithRunes[*ast.MessageNode] - msgElements []ast.MessageElement - fld *ast.FieldNode - msgFld nodeWithRunes[*ast.FieldNode] - mapFld nodeWithRunes[*ast.MapFieldNode] - mapType *ast.MapTypeNode - grp *ast.GroupNode - msgGrp nodeWithRunes[*ast.GroupNode] - oo nodeWithRunes[*ast.OneofNode] - ooElement ast.OneofElement - ooElements []ast.OneofElement - ext nodeWithRunes[*ast.ExtensionRangeNode] - resvd nodeWithRunes[*ast.ReservedNode] - en nodeWithRunes[*ast.EnumNode] - enElements []ast.EnumElement - env nodeWithRunes[*ast.EnumValueNode] - extend nodeWithRunes[*ast.ExtendNode] - extElement ast.ExtendElement - extElements []ast.ExtendElement - svc nodeWithRunes[*ast.ServiceNode] - svcElements []ast.ServiceElement - mtd nodeWithRunes[*ast.RPCNode] - mtdMsgType *ast.RPCTypeNode - mtdElements []ast.RPCElement - optRaw *ast.OptionNode - opt nodeWithRunes[*ast.OptionNode] - opts *compactOptionSlices - refRaw *ast.FieldReferenceNode - ref nodeWithRunes[*ast.FieldReferenceNode] - optNms *fieldRefSlices - cmpctOpts *ast.CompactOptionsNode - rng *ast.RangeNode - rngs *rangeSlices - names *nameSlices - cidPart nodeWithRunes[*ast.IdentNode] - cid *identSlices - tid ast.IdentValueNode - sl *valueSlices - msgLitFlds *messageFieldList - msgLitFld *ast.MessageFieldNode - v ast.ValueNode - il ast.IntValueNode - str []*ast.StringLiteralNode - s *ast.StringLiteralNode - i *ast.UintLiteralNode - f *ast.FloatLiteralNode - id *ast.IdentNode - b *ast.RuneNode - bs []*ast.RuneNode - err error + file *ast.FileNode + syn *ast.SyntaxNode + fileDecl ast.FileElement + fileDecls []ast.FileElement + pkg *ast.PackageNode + imprt *ast.ImportNode + msg *ast.MessageNode + msgDecl ast.MessageElement + msgDecls []ast.MessageElement + fld *ast.FieldNode + mapFld *ast.MapFieldNode + mapType *ast.MapTypeNode + grp *ast.GroupNode + oo *ast.OneOfNode + ooDecl ast.OneOfElement + ooDecls []ast.OneOfElement + ext *ast.ExtensionRangeNode + resvd *ast.ReservedNode + en *ast.EnumNode + enDecl ast.EnumElement + enDecls []ast.EnumElement + env *ast.EnumValueNode + extend *ast.ExtendNode + extDecl ast.ExtendElement + extDecls []ast.ExtendElement + svc *ast.ServiceNode + svcDecl ast.ServiceElement + svcDecls []ast.ServiceElement + mtd *ast.RPCNode + rpcType *ast.RPCTypeNode + rpcDecl ast.RPCElement + rpcDecls []ast.RPCElement + opt *ast.OptionNode + opts *compactOptionList + ref *ast.FieldReferenceNode + optNms *fieldRefList + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeList + names *nameList + cid *identList + tid ast.IdentValueNode + sl *valueList + msgField *ast.MessageFieldNode + msgEntry *messageFieldEntry + msgLit *messageFieldList + v ast.ValueNode + il ast.IntValueNode + str *stringList + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + err error } // any non-terminal which returns a value needs a type, which is // really a field name in the above union struct -%type file -%type syntaxDecl -%type editionDecl -%type fileBody fileElement fileElements -%type importDecl -%type packageDecl -%type compactOption oneofOptionDecl -%type optionDecl compactOptionEntry compactOptionFinal -%type compactOptionDecls compactOptionLeadingDecls -%type extensionName messageLiteralFieldName optionNamePart -%type optionNameEntry optionNameFinal -%type optionName optionNameLeading -%type compactOptions -%type fieldValue optionValue scalarValue fieldScalarValue messageLiteralWithBraces messageLiteral numLit specialFloatLit listLiteral listElement listOfMessagesLiteral messageValue -%type enumValueNumber -%type identifier mapKeyType msgElementName extElementName oneofElementName notGroupElementName mtdElementName enumValueName fieldCardinality -%type qualifiedIdentifierEntry qualifiedIdentifierFinal mtdElementIdentEntry mtdElementIdentFinal -%type qualifiedIdentifier msgElementIdent extElementIdent oneofElementIdent notGroupElementIdent mtdElementIdent qualifiedIdentifierDot qualifiedIdentifierLeading mtdElementIdentLeading -%type typeName msgElementTypeIdent extElementTypeIdent oneofElementTypeIdent notGroupElementTypeIdent mtdElementTypeIdent -%type listElements messageLiterals -%type messageLiteralFieldEntry messageLiteralFields messageTextFormat -%type messageLiteralField -%type messageFieldDecl -%type oneofFieldDecl extensionFieldDecl -%type oneofDecl -%type groupDecl oneofGroupDecl -%type messageGroupDecl -%type mapFieldDecl -%type mapType -%type messageDecl -%type messageElement messageElements messageBody -%type oneofElement -%type oneofElements oneofBody -%type fieldNameStrings fieldNameIdents -%type msgReserved enumReserved reservedNames -%type tagRange enumValueRange -%type tagRanges enumValueRanges -%type extensionRangeDecl -%type enumDecl -%type enumElement enumElements enumBody -%type enumValueDecl -%type extensionDecl -%type extensionElement -%type extensionElements extensionBody -%type stringLit -%type serviceDecl -%type serviceElement serviceElements serviceBody -%type methodDecl -%type methodElement methodElements methodBody -%type methodMessageType -%type semicolon -%type semicolons semicolonList +%type file +%type syntax +%type fileDecl +%type fileDecls +%type import +%type package +%type option compactOption +%type compactOptionDecls +%type rpcDecl +%type rpcDecls +%type optionNameComponent aggName +%type optionName +%type compactOptions +%type constant scalarConstant aggregate msgLit numLit +%type intLit +%type name keyType msgElementName extElementName oneofElementName enumElementName +%type ident msgElementIdent extElementIdent oneofElementIdent +%type typeIdent msgElementTypeIdent extElementTypeIdent oneofElementTypeIdent +%type constantList msgList +%type aggFieldEntry +%type aggField +%type aggFields +%type msgField oneofField extField +%type oneof +%type group oneofGroup +%type mapField +%type mapType +%type message +%type messageDecl +%type messageDecls +%type ooDecl +%type ooDecls +%type fieldNames +%type msgReserved enumReserved reservedNames +%type tagRange enumRange +%type tagRanges enumRanges +%type extensions +%type enum +%type enumDecl +%type enumDecls +%type enumValue +%type extend +%type extendDecl +%type extendDecls +%type stringLit +%type service +%type serviceDecl +%type serviceDecls +%type rpc +%type rpcType // same for terminals %token _STRING_LIT %token _INT_LIT %token _FLOAT_LIT %token _NAME -%token _SYNTAX _EDITION _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED +%token _SYNTAX _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED %token _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64 %token _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND %token _SERVICE _RPC _STREAM _RETURNS @@ -144,158 +140,96 @@ import ( %% -file : syntaxDecl { +file : syntax { lex := protolex.(*protoLex) $$ = ast.NewFileNode(lex.info, $1, nil, lex.eof) lex.res = $$ } - | editionDecl { - lex := protolex.(*protoLex) - $$ = ast.NewFileNodeWithEdition(lex.info, $1, nil, lex.eof) - lex.res = $$ - } - | fileBody { + | fileDecls { lex := protolex.(*protoLex) $$ = ast.NewFileNode(lex.info, nil, $1, lex.eof) lex.res = $$ } - | syntaxDecl fileBody { + | syntax fileDecls { lex := protolex.(*protoLex) $$ = ast.NewFileNode(lex.info, $1, $2, lex.eof) lex.res = $$ } - | editionDecl fileBody { - lex := protolex.(*protoLex) - $$ = ast.NewFileNodeWithEdition(lex.info, $1, $2, lex.eof) - lex.res = $$ - } | { lex := protolex.(*protoLex) $$ = ast.NewFileNode(lex.info, nil, nil, lex.eof) lex.res = $$ } -fileBody : semicolons fileElements { - $$ = prependRunes(toFileElement, $1, $2) - } - -fileElements : fileElements fileElement { - $$ = append($1, $2...) - } - | fileElement { - $$ = $1 - } - -fileElement : importDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | packageDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | optionDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | messageDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | enumDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | extensionDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) - } - | serviceDecl { - $$ = toElements[ast.FileElement](toFileElement, $1.Node, $1.Runes) +fileDecls : fileDecls fileDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } } - | error { - $$ = nil + | fileDecl { + if $1 != nil { + $$ = []ast.FileElement{$1} + } else { + $$ = nil + } } -semicolonList : ';' { - $$ = []*ast.RuneNode{$1} +fileDecl : import { + $$ = $1 } - | semicolonList ';' { - $$ = append($1, $2) + | package { + $$ = $1 } - -semicolons : semicolonList { + | option { $$ = $1 } - | { - $$ = nil + | message { + $$ = $1 } - -semicolon : ';' { + | enum { $$ = $1 - } | - { - protolex.(*protoLex).Error("syntax error: expecting ';'") - $$ = nil } - -syntaxDecl : _SYNTAX '=' stringLit ';' { - $$ = ast.NewSyntaxNode($1.ToKeyword(), $2, toStringValueNode($3), $4) + | extend { + $$ = $1 } - -editionDecl : _EDITION '=' stringLit ';' { - $$ = ast.NewEditionNode($1.ToKeyword(), $2, toStringValueNode($3), $4) + | service { + $$ = $1 } - -importDecl : _IMPORT stringLit semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), nil, nil, toStringValueNode($2), semi), extra...) + | ';' { + $$ = ast.NewEmptyDeclNode($1) } - | _IMPORT _WEAK stringLit semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), nil, $2.ToKeyword(), toStringValueNode($3), semi), extra...) + | error ';' { + $$ = nil } - | _IMPORT _PUBLIC stringLit semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewImportNode($1.ToKeyword(), $2.ToKeyword(), nil, toStringValueNode($3), semi), extra...) + | error { + $$ = nil } -packageDecl : _PACKAGE qualifiedIdentifier semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewPackageNode($1.ToKeyword(), $2.toIdentValueNode(nil), semi), extra...) +syntax : _SYNTAX '=' stringLit ';' { + $$ = ast.NewSyntaxNode($1.ToKeyword(), $2, $3.toStringValueNode(), $4) } -qualifiedIdentifier : identifier { - $$ = &identSlices{idents: []*ast.IdentNode{$1}} +import : _IMPORT stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), nil, nil, $2.toStringValueNode(), $3) } - | qualifiedIdentifier '.' identifier { - $1.idents = append($1.idents, $3) - $1.dots = append($1.dots, $2) - $$ = $1 - } - -qualifiedIdentifierDot : qualifiedIdentifierFinal { - $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} + | _IMPORT _WEAK stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), nil, $2.ToKeyword(), $3.toStringValueNode(), $4) } - | qualifiedIdentifierLeading qualifiedIdentifierFinal { - $1.idents = append($1.idents, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 + | _IMPORT _PUBLIC stringLit ';' { + $$ = ast.NewImportNode($1.ToKeyword(), $2.ToKeyword(), nil, $3.toStringValueNode(), $4) } -qualifiedIdentifierLeading : qualifiedIdentifierEntry { - $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} - } - | qualifiedIdentifierLeading qualifiedIdentifierEntry { - $1.idents = append($1.idents, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 +package : _PACKAGE ident ';' { + $$ = ast.NewPackageNode($1.ToKeyword(), $2.toIdentValueNode(nil), $3) } -qualifiedIdentifierFinal : identifier { - $$ = newNodeWithRunes($1) +ident : name { + $$ = &identList{$1, nil, nil} } - | qualifiedIdentifierEntry { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - $$ = $1 - } - -qualifiedIdentifierEntry : identifier '.' { - $$ = newNodeWithRunes($1, $2) + | name '.' ident { + $$ = &identList{$1, $2, $3} } // to mimic limitations of protoc recursive-descent parser, @@ -303,132 +237,54 @@ qualifiedIdentifierEntry : identifier '.' { // (or oneof statement keywords [e.g. "option"] below) msgElementIdent : msgElementName { - $$ = &identSlices{idents: []*ast.IdentNode{$1}} + $$ = &identList{$1, nil, nil} } - | msgElementIdent '.' identifier { - $1.idents = append($1.idents, $3) - $1.dots = append($1.dots, $2) - $$ = $1 + | msgElementName '.' ident { + $$ = &identList{$1, $2, $3} } extElementIdent : extElementName { - $$ = &identSlices{idents: []*ast.IdentNode{$1}} + $$ = &identList{$1, nil, nil} } - | extElementIdent '.' identifier { - $1.idents = append($1.idents, $3) - $1.dots = append($1.dots, $2) - $$ = $1 + | extElementName '.' ident { + $$ = &identList{$1, $2, $3} } oneofElementIdent : oneofElementName { - $$ = &identSlices{idents: []*ast.IdentNode{$1}} - } - | oneofElementIdent '.' identifier { - $1.idents = append($1.idents, $3) - $1.dots = append($1.dots, $2) - $$ = $1 - } - -notGroupElementIdent : notGroupElementName { - $$ = &identSlices{idents: []*ast.IdentNode{$1}} - } - | notGroupElementIdent '.' identifier { - $1.idents = append($1.idents, $3) - $1.dots = append($1.dots, $2) - $$ = $1 - } - -mtdElementIdent : mtdElementIdentFinal { - $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} - } - | mtdElementIdentLeading mtdElementIdentFinal { - $1.idents = append($1.idents, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 + $$ = &identList{$1, nil, nil} } - -mtdElementIdentLeading : mtdElementIdentEntry { - $$ = &identSlices{idents: []*ast.IdentNode{$1.Node}, dots: $1.Runes} - } - | mtdElementIdentLeading mtdElementIdentEntry { - $1.idents = append($1.idents, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 - } - -mtdElementIdentFinal : mtdElementName { - $$ = newNodeWithRunes($1) - } - | mtdElementIdentEntry { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - $$ = $1 - } - -mtdElementIdentEntry : mtdElementName '.' { - $$ = newNodeWithRunes($1, $2) + | oneofElementName '.' ident { + $$ = &identList{$1, $2, $3} } -oneofOptionDecl : _OPTION optionName '=' optionValue semicolon { - optName := ast.NewOptionNameNode($2.refs, $2.dots) +option : _OPTION optionName '=' constant ';' { + refs, dots := $2.toNodes() + optName := ast.NewOptionNameNode(refs, dots) $$ = ast.NewOptionNode($1.ToKeyword(), optName, $3, $4, $5) } -optionDecl : _OPTION optionName '=' optionValue semicolons { - optName := ast.NewOptionNameNode($2.refs, $2.dots) - semi, extra := protolex.(*protoLex).requireSemicolon($5) - $$ = newNodeWithRunes(ast.NewOptionNode($1.ToKeyword(), optName, $3, $4, semi), extra...) - } - -optionNamePart : identifier { - $$ = ast.NewFieldReferenceNode($1) +optionName : optionNameComponent { + $$ = &fieldRefList{$1, nil, nil} } - | extensionName { - $$ = $1 - } - -optionNameEntry : optionNamePart '.' { - $$ = newNodeWithRunes($1, $2) + | optionNameComponent '.' optionName { + $$ = &fieldRefList{$1, $2, $3} } -optionNameFinal : optionNamePart { - $$ = newNodeWithRunes($1) - } - | optionNameEntry { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - $$ = $1 - } - -optionNameLeading : optionNameEntry { - $$ = &fieldRefSlices{refs: []*ast.FieldReferenceNode{$1.Node}, dots: $1.Runes} - } - | optionNameLeading optionNameEntry { - $1.refs = append($1.refs, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 - } - -optionName : optionNameFinal { - $$ = &fieldRefSlices{refs: []*ast.FieldReferenceNode{$1.Node}, dots: $1.Runes} - } - | optionNameLeading optionNameFinal { - $1.refs = append($1.refs, $2.Node) - $1.dots = append($1.dots, $2.Runes...) - $$ = $1 +optionNameComponent : name { + $$ = ast.NewFieldReferenceNode($1) } - -extensionName : '(' typeName ')' { + | '(' typeIdent ')' { $$ = ast.NewExtensionFieldReferenceNode($1, $2, $3) } -optionValue : scalarValue - | messageLiteralWithBraces +constant : scalarConstant + | aggregate -scalarValue : stringLit { - $$ = toStringValueNode($1) +scalarConstant : stringLit { + $$ = $1.toStringValueNode() } | numLit - | specialFloatLit - | identifier { + | name { $$ = $1 } @@ -438,9 +294,23 @@ numLit : _FLOAT_LIT { | '-' _FLOAT_LIT { $$ = ast.NewSignedFloatLiteralNode($1, $2) } + | '+' _FLOAT_LIT { + $$ = ast.NewSignedFloatLiteralNode($1, $2) + } + | '+' _INF { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } + | '-' _INF { + f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) + $$ = ast.NewSignedFloatLiteralNode($1, f) + } | _INT_LIT { $$ = $1 } + | '+' _INT_LIT { + $$ = ast.NewPositiveUintLiteralNode($1, $2) + } | '-' _INT_LIT { if $2.Val > math.MaxInt64 + 1 { // can't represent as int so treat as float literal @@ -450,63 +320,56 @@ numLit : _FLOAT_LIT { } } -specialFloatLit : '-' _INF { - f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) - $$ = ast.NewSignedFloatLiteralNode($1, f) +stringLit : _STRING_LIT { + $$ = &stringList{$1, nil} } - | '-' _NAN { - f := ast.NewSpecialFloatLiteralNode($2.ToKeyword()) - $$ = ast.NewSignedFloatLiteralNode($1, f) + | _STRING_LIT stringLit { + $$ = &stringList{$1, $2} } -stringLit : _STRING_LIT { - $$ = []*ast.StringLiteralNode{$1} +aggregate : '{' aggFields '}' { + fields, delims := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delims, $3) } - | stringLit _STRING_LIT { - $$ = append($1, $2) + | '{' error '}' { + $$ = nil } -messageLiteralWithBraces : '{' messageTextFormat '}' { - if $2 == nil { - $$ = ast.NewMessageLiteralNode($1, nil, nil, $3) +aggFields : aggField { + if $1 != nil { + $$ = &messageFieldList{$1, nil} } else { - fields, delimiters := $2.toNodes() - $$ = ast.NewMessageLiteralNode($1, fields, delimiters, $3) + $$ = nil } } - | '{' '}' { - $$ = ast.NewMessageLiteralNode($1, nil, nil, $2) - } - -messageTextFormat : messageLiteralFields - -messageLiteralFields : messageLiteralFieldEntry - | messageLiteralFieldEntry messageLiteralFields { + | aggField aggFields { if $1 != nil { - $1.next = $2 - $$ = $1 + $$ = &messageFieldList{$1, $2} } else { $$ = $2 } } + | { + $$ = nil + } -messageLiteralFieldEntry : messageLiteralField { +aggField : aggFieldEntry { if $1 != nil { - $$ = &messageFieldList{field: $1} + $$ = &messageFieldEntry{$1, nil} } else { $$ = nil } } - | messageLiteralField ',' { + | aggFieldEntry ',' { if $1 != nil { - $$ = &messageFieldList{field: $1, delimiter: $2} + $$ = &messageFieldEntry{$1, $2} } else { $$ = nil } } - | messageLiteralField ';' { + | aggFieldEntry ';' { if $1 != nil { - $$ = &messageFieldList{field: $1, delimiter: $2} + $$ = &messageFieldEntry{$1, $2} } else { $$ = nil } @@ -521,273 +384,280 @@ messageLiteralFieldEntry : messageLiteralField { $$ = nil } -messageLiteralField : messageLiteralFieldName ':' fieldValue { - if $1 != nil && $2 != nil { +aggFieldEntry : aggName ':' scalarConstant { + if $1 != nil { $$ = ast.NewMessageFieldNode($1, $2, $3) } else { $$ = nil } } - | messageLiteralFieldName messageValue { + | aggName '[' ']' { + if $1 != nil { + val := ast.NewArrayLiteralNode($2, nil, nil, $3) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } + | aggName ':' '[' ']' { + if $1 != nil { + val := ast.NewArrayLiteralNode($3, nil, nil, $4) + $$ = ast.NewMessageFieldNode($1, $2, val) + } else { + $$ = nil + } + } + | aggName '[' msgList ']' { + if $1 != nil { + vals, commas := $3.toNodes() + val := ast.NewArrayLiteralNode($2, vals, commas, $4) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } + | aggName ':' '[' constantList ']' { + if $1 != nil { + vals, commas := $4.toNodes() + val := ast.NewArrayLiteralNode($3, vals, commas, $5) + $$ = ast.NewMessageFieldNode($1, $2, val) + } else { + $$ = nil + } + } + | aggName ':' '[' error ']' { + $$ = nil + } + | aggName '[' error ']' { + $$ = nil + } + | aggName ':' msgLit { + if $1 != nil && $3 != nil { + $$ = ast.NewMessageFieldNode($1, $2, $3) + } else { + $$ = nil + } + } + | aggName msgLit { if $1 != nil && $2 != nil { $$ = ast.NewMessageFieldNode($1, nil, $2) } else { $$ = nil } } - | error ':' fieldValue { + | aggName ':' '<' error '>' { + $$ = nil + } + | aggName '<' error '>' { $$ = nil } -messageLiteralFieldName : identifier { +aggName : name { $$ = ast.NewFieldReferenceNode($1) } - | '[' qualifiedIdentifierDot ']' { + | '[' ident ']' { $$ = ast.NewExtensionFieldReferenceNode($1, $2.toIdentValueNode(nil), $3) } - | '[' qualifiedIdentifierDot '/' qualifiedIdentifierDot ']' { + | '[' ident '/' ident ']' { $$ = ast.NewAnyTypeReferenceNode($1, $2.toIdentValueNode(nil), $3, $4.toIdentValueNode(nil), $5) } | '[' error ']' { $$ = nil } -fieldValue : fieldScalarValue - | messageLiteral - | listLiteral - -fieldScalarValue : stringLit { - $$ = toStringValueNode($1) - } - | numLit - | '-' identifier { - kw := $2.ToKeyword() - switch strings.ToLower(kw.Val) { - case "inf", "infinity", "nan": - // these are acceptable - default: - // anything else is not - protolex.(*protoLex).Error(`only identifiers "inf", "infinity", or "nan" may appear after negative sign`) +msgList : msgLit { + if $1 == nil { + $$ = nil + } else { + $$ = &valueList{$1, nil, nil} } - // we'll validate the identifier later - f := ast.NewSpecialFloatLiteralNode(kw) - $$ = ast.NewSignedFloatLiteralNode($1, f) - } - | identifier { - $$ = $1 } - -messageValue : messageLiteral - | listOfMessagesLiteral - -messageLiteral : messageLiteralWithBraces - | '<' messageTextFormat '>' { - if $2 == nil { - $$ = ast.NewMessageLiteralNode($1, nil, nil, $3) + | msgLit ',' msgList { + if $1 == nil { + $$ = nil } else { - fields, delimiters := $2.toNodes() - $$ = ast.NewMessageLiteralNode($1, fields, delimiters, $3) + $$ = &valueList{$1, $2, $3} } } - | '<' '>' { - $$ = ast.NewMessageLiteralNode($1, nil, nil, $2) - } -listLiteral : '[' listElements ']' { - if $2 == nil { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) - } else { - $$ = ast.NewArrayLiteralNode($1, $2.vals, $2.commas, $3) - } +msgLit : aggregate { + $$ = $1 } - | '[' ']' { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $2) + | '<' aggFields '>' { + fields, delims := $2.toNodes() + $$ = ast.NewMessageLiteralNode($1, fields, delims, $3) } - | '[' error ']' { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + | '<' error '>' { + $$ = nil } -listElements : listElement { - $$ = &valueSlices{vals: []ast.ValueNode{$1}} +constantList : constant { + $$ = &valueList{$1, nil, nil} } - | listElements ',' listElement { - $1.vals = append($1.vals, $3) - $1.commas = append($1.commas, $2) - $$ = $1 - } - -listElement : fieldScalarValue - | messageLiteral - -listOfMessagesLiteral : '[' messageLiterals ']' { - if $2 == nil { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) - } else { - $$ = ast.NewArrayLiteralNode($1, $2.vals, $2.commas, $3) - } + | constant ',' constantList { + $$ = &valueList{$1, $2, $3} } - | '[' ']' { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $2) + | '<' aggFields '>' { + fields, delims := $2.toNodes() + msg := ast.NewMessageLiteralNode($1, fields, delims, $3) + $$ = &valueList{msg, nil, nil} } - | '[' error ']' { - $$ = ast.NewArrayLiteralNode($1, nil, nil, $3) + | '<' aggFields '>' ',' constantList { + fields, delims := $2.toNodes() + msg := ast.NewMessageLiteralNode($1, fields, delims, $3) + $$ = &valueList{msg, $4, $5} } - -messageLiterals : messageLiteral { - $$ = &valueSlices{vals: []ast.ValueNode{$1}} + | '<' error '>' { + $$ = nil } - | messageLiterals ',' messageLiteral { - $1.vals = append($1.vals, $3) - $1.commas = append($1.commas, $2) - $$ = $1 + | '<' error '>' ',' constantList { + $$ = $5 } -typeName : qualifiedIdentifierDot { +typeIdent : ident { $$ = $1.toIdentValueNode(nil) } - | '.' qualifiedIdentifierDot { + | '.' ident { $$ = $2.toIdentValueNode($1) } msgElementTypeIdent : msgElementIdent { $$ = $1.toIdentValueNode(nil) } - | '.' qualifiedIdentifier { + | '.' ident { $$ = $2.toIdentValueNode($1) } extElementTypeIdent : extElementIdent { $$ = $1.toIdentValueNode(nil) } - | '.' qualifiedIdentifier { + | '.' ident { $$ = $2.toIdentValueNode($1) } oneofElementTypeIdent : oneofElementIdent { $$ = $1.toIdentValueNode(nil) } - | '.' qualifiedIdentifier { + | '.' ident { $$ = $2.toIdentValueNode($1) } -notGroupElementTypeIdent : notGroupElementIdent { - $$ = $1.toIdentValueNode(nil) +msgField : _REQUIRED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - | '.' qualifiedIdentifier { - $$ = $2.toIdentValueNode($1) + | _OPTIONAL typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - -mtdElementTypeIdent : mtdElementIdent { - $$ = $1.toIdentValueNode(nil) + | _REPEATED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - | '.' qualifiedIdentifierDot { - $$ = $2.toIdentValueNode($1) + | _REQUIRED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) } - -fieldCardinality : _REQUIRED - | _OPTIONAL - | _REPEATED - -compactOptions : '[' compactOptionDecls ']' { - $$ = ast.NewCompactOptionsNode($1, $2.options, $2.commas, $3) + | _OPTIONAL typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) } - | '[' ']' { - protolex.(*protoLex).Error("compact options must have at least one option") - $$ = ast.NewCompactOptionsNode($1, nil, nil, $2) + | _REPEATED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) } - -compactOptionDecls : compactOptionFinal { - $$ = &compactOptionSlices{options: []*ast.OptionNode{$1.Node}, commas: $1.Runes} + | msgElementTypeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) } - | compactOptionLeadingDecls compactOptionFinal { - $1.options = append($1.options, $2.Node) - $1.commas = append($1.commas, $2.Runes...) - $$ = $1 + | msgElementTypeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) } -compactOptionLeadingDecls : compactOptionEntry { - $$ = &compactOptionSlices{options: []*ast.OptionNode{$1.Node}, commas: $1.Runes} +extField : _REQUIRED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - | compactOptionLeadingDecls compactOptionEntry { - $1.options = append($1.options, $2.Node) - $1.commas = append($1.commas, $2.Runes...) - $$ = $1 + | _OPTIONAL typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - -compactOptionFinal : compactOption { - $$ = newNodeWithRunes($1) + | _REPEATED typeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) } - | compactOptionEntry { - protolex.(*protoLex).Error("syntax error: unexpected ','") - $$ = $1 + | _REQUIRED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _OPTIONAL typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | _REPEATED typeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) + } + | extElementTypeIdent name '=' _INT_LIT ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) + } + | extElementTypeIdent name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) } -compactOptionEntry : compactOption ',' { - $$ = newNodeWithRunes($1, $2) - } +compactOptions: '[' compactOptionDecls ']' { + opts, commas := $2.toNodes() + $$ = ast.NewCompactOptionsNode($1, opts, commas, $3) + } -compactOption : optionName '=' optionValue { - optName := ast.NewOptionNameNode($1.refs, $1.dots) - $$ = ast.NewCompactOptionNode(optName, $2, $3) +compactOptionDecls : compactOption { + $$ = &compactOptionList{$1, nil, nil} } - | optionName { - optName := ast.NewOptionNameNode($1.refs, $1.dots) - protolex.(*protoLex).Error("compact option must have a value") - $$ = ast.NewCompactOptionNode(optName, nil, nil) + | compactOption ',' compactOptionDecls { + $$ = &compactOptionList{$1, $2, $3} } +compactOption: optionName '=' constant { + refs, dots := $1.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + $$ = ast.NewCompactOptionNode(optName, $2, $3) + } -groupDecl : fieldCardinality _GROUP identifier '=' _INT_LIT '{' messageBody '}' { +group : _REQUIRED _GROUP name '=' _INT_LIT '{' messageDecls '}' { $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) } - | fieldCardinality _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' { - $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) - } - -messageGroupDecl : fieldCardinality _GROUP identifier '=' _INT_LIT '{' messageBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8), $9...) + | _OPTIONAL _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) } - | fieldCardinality _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9), $10...) + | _REPEATED _GROUP name '=' _INT_LIT '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, nil, $6, $7, $8) } - | fieldCardinality _GROUP identifier '{' messageBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, nil, nil, nil, $4, $5, $6), $7...) + | _REQUIRED _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) } - | fieldCardinality _GROUP identifier compactOptions '{' messageBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, nil, nil, $4, $5, $6, $7), $8...) + | _OPTIONAL _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) } - -oneofDecl : _ONEOF identifier '{' oneofBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewOneofNode($1.ToKeyword(), $2, $3, $4, $5), $6...) + | _REPEATED _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { + $$ = ast.NewGroupNode($1.ToKeyword(), $2.ToKeyword(), $3, $4, $5, $6, $7, $8, $9) } -oneofBody : { - $$ = nil +oneof : _ONEOF name '{' ooDecls '}' { + $$ = ast.NewOneOfNode($1.ToKeyword(), $2, $3, $4, $5) } - | oneofElements -oneofElements : oneofElements oneofElement { +ooDecls : ooDecls ooDecl { if $2 != nil { $$ = append($1, $2) } else { $$ = $1 } } - | oneofElement { + | ooDecl { if $1 != nil { - $$ = []ast.OneofElement{$1} + $$ = []ast.OneOfElement{$1} } else { $$ = nil } } + | { + $$ = nil + } -oneofElement : oneofOptionDecl { +ooDecl : option { $$ = $1 } - | oneofFieldDecl { + | oneofField { $$ = $1 } - | oneofGroupDecl { + | oneofGroup { $$ = $1 } | error ';' { @@ -797,55 +667,32 @@ oneofElement : oneofOptionDecl { $$ = nil } -oneofFieldDecl : oneofElementTypeIdent identifier '=' _INT_LIT semicolon { +oneofField : oneofElementTypeIdent name '=' _INT_LIT ';' { $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) } - | oneofElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { + | oneofElementTypeIdent name '=' _INT_LIT compactOptions ';' { $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) } - | oneofElementTypeIdent identifier semicolon { - $$ = ast.NewFieldNode(nil, $1, $2, nil, nil, nil, $3) - } - | oneofElementTypeIdent identifier compactOptions semicolon { - $$ = ast.NewFieldNode(nil, $1, $2, nil, nil, $3, $4) - } -oneofGroupDecl : _GROUP identifier '=' _INT_LIT '{' messageBody '}' { +oneofGroup : _GROUP name '=' _INT_LIT '{' messageDecls '}' { $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, nil, $5, $6, $7) } - | _GROUP identifier '=' _INT_LIT compactOptions '{' messageBody '}' { + | _GROUP name '=' _INT_LIT compactOptions '{' messageDecls '}' { $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, $3, $4, $5, $6, $7, $8) } - | _GROUP identifier '{' messageBody '}' { - $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, nil, nil, nil, $3, $4, $5) - } - | _GROUP identifier compactOptions '{' messageBody '}' { - $$ = ast.NewGroupNode(nil, $1.ToKeyword(), $2, nil, nil, $3, $4, $5, $6) - } - -mapFieldDecl : mapType identifier '=' _INT_LIT semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($5) - $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, $3, $4, nil, semi), extra...) +mapField : mapType name '=' _INT_LIT ';' { + $$ = ast.NewMapFieldNode($1, $2, $3, $4, nil, $5) } - | mapType identifier '=' _INT_LIT compactOptions semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($6) - $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, $3, $4, $5, semi), extra...) - } - | mapType identifier semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, nil, nil, nil, semi), extra...) - } - | mapType identifier compactOptions semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewMapFieldNode($1, $2, nil, nil, $3, semi), extra...) + | mapType name '=' _INT_LIT compactOptions ';' { + $$ = ast.NewMapFieldNode($1, $2, $3, $4, $5, $6) } -mapType : _MAP '<' mapKeyType ',' typeName '>' { +mapType : _MAP '<' keyType ',' typeIdent '>' { $$ = ast.NewMapTypeNode($1.ToKeyword(), $2, $3, $4, $5, $6) } -mapKeyType : _INT32 +keyType : _INT32 | _INT64 | _UINT32 | _UINT64 @@ -858,23 +705,20 @@ mapKeyType : _INT32 | _BOOL | _STRING -extensionRangeDecl : _EXTENSIONS tagRanges ';' semicolons { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `extensions 1 to 10` and `extensions 1` followed by `to = 10`. - $$ = newNodeWithRunes(ast.NewExtensionRangeNode($1.ToKeyword(), $2.ranges, $2.commas, nil, $3), $4...) +extensions : _EXTENSIONS tagRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewExtensionRangeNode($1.ToKeyword(), ranges, commas, nil, $3) } - | _EXTENSIONS tagRanges compactOptions semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewExtensionRangeNode($1.ToKeyword(), $2.ranges, $2.commas, $3, semi), extra...) + | _EXTENSIONS tagRanges compactOptions ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewExtensionRangeNode($1.ToKeyword(), ranges, commas, $3, $4) } tagRanges : tagRange { - $$ = &rangeSlices{ranges: []*ast.RangeNode{$1}} + $$ = &rangeList{$1, nil, nil} } - | tagRanges ',' tagRange { - $1.ranges = append($1.ranges, $3) - $1.commas = append($1.commas, $2) - $$ = $1 + | tagRange ',' tagRanges { + $$ = &rangeList{$1, $2, $3} } tagRange : _INT_LIT { @@ -887,227 +731,190 @@ tagRange : _INT_LIT { $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) } -enumValueRanges : enumValueRange { - $$ = &rangeSlices{ranges: []*ast.RangeNode{$1}} +enumRanges : enumRange { + $$ = &rangeList{$1, nil, nil} } - | enumValueRanges ',' enumValueRange { - $1.ranges = append($1.ranges, $3) - $1.commas = append($1.commas, $2) - $$ = $1 + | enumRange ',' enumRanges { + $$ = &rangeList{$1, $2, $3} } -enumValueRange : enumValueNumber { +enumRange : intLit { $$ = ast.NewRangeNode($1, nil, nil, nil) } - | enumValueNumber _TO enumValueNumber { + | intLit _TO intLit { $$ = ast.NewRangeNode($1, $2.ToKeyword(), $3, nil) } - | enumValueNumber _TO _MAX { + | intLit _TO _MAX { $$ = ast.NewRangeNode($1, $2.ToKeyword(), nil, $3.ToKeyword()) } -enumValueNumber : _INT_LIT { +intLit : _INT_LIT { $$ = $1 } | '-' _INT_LIT { $$ = ast.NewNegativeIntLiteralNode($1, $2) } -msgReserved : _RESERVED tagRanges ';' semicolons { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. - $$ = newNodeWithRunes(ast.NewReservedRangesNode($1.ToKeyword(), $2.ranges, $2.commas, $3), $4...) +msgReserved : _RESERVED tagRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewReservedRangesNode($1.ToKeyword(), ranges, commas, $3) } | reservedNames -enumReserved : _RESERVED enumValueRanges ';' semicolons { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. - $$ = newNodeWithRunes(ast.NewReservedRangesNode($1.ToKeyword(), $2.ranges, $2.commas, $3), $4...) +enumReserved : _RESERVED enumRanges ';' { + ranges, commas := $2.toNodes() + $$ = ast.NewReservedRangesNode($1.ToKeyword(), ranges, commas, $3) } | reservedNames -reservedNames : _RESERVED fieldNameStrings semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewReservedNamesNode($1.ToKeyword(), $2.names, $2.commas, semi), extra...) - } - | _RESERVED fieldNameIdents semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewReservedIdentifiersNode($1.ToKeyword(), $2.idents, $2.commas, semi), extra...) +reservedNames : _RESERVED fieldNames ';' { + names, commas := $2.toNodes() + $$ = ast.NewReservedNamesNode($1.ToKeyword(), names, commas, $3) } -fieldNameStrings : stringLit { - $$ = &nameSlices{names: []ast.StringValueNode{toStringValueNode($1)}} +fieldNames : stringLit { + $$ = &nameList{$1.toStringValueNode(), nil, nil} } - | fieldNameStrings ',' stringLit { - $1.names = append($1.names, toStringValueNode($3)) - $1.commas = append($1.commas, $2) - $$ = $1 + | stringLit ',' fieldNames { + $$ = &nameList{$1.toStringValueNode(), $2, $3} } -fieldNameIdents : identifier { - $$ = &nameSlices{idents: []*ast.IdentNode{$1}} - } - | fieldNameIdents ',' identifier { - $1.idents = append($1.idents, $3) - $1.commas = append($1.commas, $2) - $$ = $1 +enum : _ENUM name '{' enumDecls '}' { + $$ = ast.NewEnumNode($1.ToKeyword(), $2, $3, $4, $5) } -enumDecl : _ENUM identifier '{' enumBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewEnumNode($1.ToKeyword(), $2, $3, $4, $5), $6...) +enumDecls : enumDecls enumDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } } - -enumBody : semicolons { - $$ = prependRunes(toEnumElement, $1, nil) + | enumDecl { + if $1 != nil { + $$ = []ast.EnumElement{$1} + } else { + $$ = nil + } } - | semicolons enumElements { - $$ = prependRunes(toEnumElement, $1, $2) + | { + $$ = nil } -enumElements : enumElements enumElement { - $$ = append($1, $2...) +enumDecl : option { + $$ = $1 } - | enumElement { + | enumValue { $$ = $1 } - -enumElement : optionDecl { - $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + | enumReserved { + $$ = $1 } - | enumValueDecl { - $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + | ';' { + $$ = ast.NewEmptyDeclNode($1) } - | enumReserved { - $$ = toElements[ast.EnumElement](toEnumElement, $1.Node, $1.Runes) + | error ';' { + $$ = nil } | error { $$ = nil } -enumValueDecl : enumValueName '=' enumValueNumber semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewEnumValueNode($1, $2, $3, nil, semi), extra...) +enumValue : enumElementName '=' intLit ';' { + $$ = ast.NewEnumValueNode($1, $2, $3, nil, $4) } - | enumValueName '=' enumValueNumber compactOptions semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($5) - $$ = newNodeWithRunes(ast.NewEnumValueNode($1, $2, $3, $4, semi), extra...) + | enumElementName '=' intLit compactOptions ';' { + $$ = ast.NewEnumValueNode($1, $2, $3, $4, $5) } -messageDecl : _MESSAGE identifier '{' messageBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewMessageNode($1.ToKeyword(), $2, $3, $4, $5), $6...) +message : _MESSAGE name '{' messageDecls '}' { + $$ = ast.NewMessageNode($1.ToKeyword(), $2, $3, $4, $5) } -messageBody : semicolons { - $$ = prependRunes(toMessageElement, $1, nil) +messageDecls : messageDecls messageDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | messageDecl { + if $1 != nil { + $$ = []ast.MessageElement{$1} + } else { + $$ = nil + } } - | semicolons messageElements { - $$ = prependRunes(toMessageElement, $1, $2) + | { + $$ = nil } -messageElements : messageElements messageElement { - $$ = append($1, $2...) +messageDecl : msgField { + $$ = $1 } - | messageElement { + | enum { $$ = $1 } - -messageElement : messageFieldDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | message { + $$ = $1 } - | enumDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | extend { + $$ = $1 } - | messageDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | extensions { + $$ = $1 } - | extensionDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | group { + $$ = $1 } - | extensionRangeDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | option { + $$ = $1 } - | messageGroupDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | oneof { + $$ = $1 } - | optionDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | mapField { + $$ = $1 } - | oneofDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | msgReserved { + $$ = $1 } - | mapFieldDecl { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | ';' { + $$ = ast.NewEmptyDeclNode($1) } - | msgReserved { - $$ = toElements[ast.MessageElement](toMessageElement, $1.Node, $1.Runes) + | error ';' { + $$ = nil } | error { $$ = nil } -messageFieldDecl : fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($6) - $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, semis), extra...) - } - | fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT compactOptions semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($7) - $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, semis), extra...) - } - | msgElementTypeIdent identifier '=' _INT_LIT semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($5) - $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, $3, $4, nil, semis), extra...) - } - | msgElementTypeIdent identifier '=' _INT_LIT compactOptions semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($6) - $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, $3, $4, $5, semis), extra...) - } - | fieldCardinality notGroupElementTypeIdent identifier semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, nil, nil, nil, semis), extra...) - } - | fieldCardinality notGroupElementTypeIdent identifier compactOptions semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($5) - $$ = newNodeWithRunes(ast.NewFieldNode($1.ToKeyword(), $2, $3, nil, nil, $4, semis), extra...) - } - | msgElementTypeIdent identifier semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($3) - $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, nil, nil, nil, semis), extra...) - } - | msgElementTypeIdent identifier compactOptions semicolons { - semis, extra := protolex.(*protoLex).requireSemicolon($4) - $$ = newNodeWithRunes(ast.NewFieldNode(nil, $1, $2, nil, nil, $3, semis), extra...) - } - - -extensionDecl : _EXTEND typeName '{' extensionBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewExtendNode($1.ToKeyword(), $2, $3, $4, $5), $6...) - } - -extensionBody : { - $$ = nil +extend : _EXTEND typeIdent '{' extendDecls '}' { + $$ = ast.NewExtendNode($1.ToKeyword(), $2, $3, $4, $5) } - | extensionElements -extensionElements : extensionElements extensionElement { +extendDecls : extendDecls extendDecl { if $2 != nil { $$ = append($1, $2) } else { $$ = $1 } } - | extensionElement { + | extendDecl { if $1 != nil { $$ = []ast.ExtendElement{$1} } else { $$ = nil } } + | { + $$ = nil + } -extensionElement : extensionFieldDecl { +extendDecl : extField { $$ = $1 } - | groupDecl { + | group { $$ = $1 } | error ';' { @@ -1117,91 +924,96 @@ extensionElement : extensionFieldDecl { $$ = nil } -extensionFieldDecl : fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT semicolon { - $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, nil, $6) - } - | fieldCardinality notGroupElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { - $$ = ast.NewFieldNode($1.ToKeyword(), $2, $3, $4, $5, $6, $7) - } - | extElementTypeIdent identifier '=' _INT_LIT semicolon { - $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, nil, $5) - } - | extElementTypeIdent identifier '=' _INT_LIT compactOptions semicolon { - $$ = ast.NewFieldNode(nil, $1, $2, $3, $4, $5, $6) - } - -serviceDecl : _SERVICE identifier '{' serviceBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewServiceNode($1.ToKeyword(), $2, $3, $4, $5), $6...) +service : _SERVICE name '{' serviceDecls '}' { + $$ = ast.NewServiceNode($1.ToKeyword(), $2, $3, $4, $5) } -serviceBody : semicolons { - $$ = prependRunes(toServiceElement, $1, nil) - } - | semicolons serviceElements { - $$ = prependRunes(toServiceElement, $1, $2) +serviceDecls : serviceDecls serviceDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } } - -serviceElements : serviceElements serviceElement { - $$ = append($1, $2...) + | serviceDecl { + if $1 != nil { + $$ = []ast.ServiceElement{$1} + } else { + $$ = nil + } } - | serviceElement { - $$ = $1 + | { + $$ = nil } // NB: doc suggests support for "stream" declaration, separate from "rpc", but // it does not appear to be supported in protoc (doc is likely from grammar for // Google-internal version of protoc, with support for streaming stubby) -serviceElement : optionDecl { - $$ = toElements[ast.ServiceElement](toServiceElement, $1.Node, $1.Runes) +serviceDecl : option { + $$ = $1 + } + | rpc { + $$ = $1 } - | methodDecl { - $$ = toElements[ast.ServiceElement](toServiceElement, $1.Node, $1.Runes) + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil } | error { $$ = nil } -methodDecl : _RPC identifier methodMessageType _RETURNS methodMessageType semicolons { - semi, extra := protolex.(*protoLex).requireSemicolon($6) - $$ = newNodeWithRunes(ast.NewRPCNode($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, semi), extra...) +rpc : _RPC name rpcType _RETURNS rpcType ';' { + $$ = ast.NewRPCNode($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6) } - | _RPC identifier methodMessageType _RETURNS methodMessageType '{' methodBody '}' semicolons { - $$ = newNodeWithRunes(ast.NewRPCNodeWithBody($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6, $7, $8), $9...) + | _RPC name rpcType _RETURNS rpcType '{' rpcDecls '}' { + $$ = ast.NewRPCNodeWithBody($1.ToKeyword(), $2, $3, $4.ToKeyword(), $5, $6, $7, $8) } -methodMessageType : '(' _STREAM typeName ')' { +rpcType : '(' _STREAM typeIdent ')' { $$ = ast.NewRPCTypeNode($1, $2.ToKeyword(), $3, $4) } - | '(' mtdElementTypeIdent ')' { + | '(' typeIdent ')' { $$ = ast.NewRPCTypeNode($1, nil, $2, $3) } -methodBody : semicolons { - $$ = prependRunes(toMethodElement, $1, nil) +rpcDecls : rpcDecls rpcDecl { + if $2 != nil { + $$ = append($1, $2) + } else { + $$ = $1 + } + } + | rpcDecl { + if $1 != nil { + $$ = []ast.RPCElement{$1} + } else { + $$ = nil + } } - | semicolons methodElements { - $$ = prependRunes(toMethodElement, $1, $2) + | { + $$ = nil } -methodElements : methodElements methodElement { - $$ = append($1, $2...) - } - | methodElement { +rpcDecl : option { $$ = $1 } - -methodElement : optionDecl { - $$ = toElements[ast.RPCElement](toMethodElement, $1.Node, $1.Runes) + | ';' { + $$ = ast.NewEmptyDeclNode($1) + } + | error ';' { + $$ = nil } | error { $$ = nil } // excludes message, enum, oneof, extensions, reserved, extend, -// option, group, optional, required, and repeated +// option, optional, required, and repeated msgElementName : _NAME | _SYNTAX - | _EDITION | _IMPORT | _WEAK | _PUBLIC @@ -1225,6 +1037,7 @@ msgElementName : _NAME | _BOOL | _STRING | _BYTES + | _GROUP | _MAP | _TO | _MAX @@ -1233,10 +1046,9 @@ msgElementName : _NAME | _STREAM | _RETURNS -// excludes group, optional, required, and repeated +// excludes optional, required, and repeated extElementName : _NAME | _SYNTAX - | _EDITION | _IMPORT | _WEAK | _PUBLIC @@ -1261,6 +1073,7 @@ extElementName : _NAME | _BOOL | _STRING | _BYTES + | _GROUP | _ONEOF | _MAP | _EXTENSIONS @@ -1276,9 +1089,8 @@ extElementName : _NAME | _RETURNS // excludes reserved, option -enumValueName : _NAME +enumElementName : _NAME | _SYNTAX - | _EDITION | _IMPORT | _WEAK | _PUBLIC @@ -1319,108 +1131,17 @@ enumValueName : _NAME | _STREAM | _RETURNS -// excludes group, option, optional, required, and repeated +// excludes option, optional, required, and repeated oneofElementName : _NAME | _SYNTAX - | _EDITION - | _IMPORT - | _WEAK - | _PUBLIC - | _PACKAGE - | _TRUE - | _FALSE - | _INF - | _NAN - | _DOUBLE - | _FLOAT - | _INT32 - | _INT64 - | _UINT32 - | _UINT64 - | _SINT32 - | _SINT64 - | _FIXED32 - | _FIXED64 - | _SFIXED32 - | _SFIXED64 - | _BOOL - | _STRING - | _BYTES - | _ONEOF - | _MAP - | _EXTENSIONS - | _TO - | _MAX - | _RESERVED - | _ENUM - | _MESSAGE - | _EXTEND - | _SERVICE - | _RPC - | _STREAM - | _RETURNS - -// excludes group -notGroupElementName : _NAME - | _SYNTAX - | _EDITION | _IMPORT | _WEAK | _PUBLIC | _PACKAGE - | _OPTION | _TRUE | _FALSE | _INF | _NAN - | _REPEATED - | _OPTIONAL - | _REQUIRED - | _DOUBLE - | _FLOAT - | _INT32 - | _INT64 - | _UINT32 - | _UINT64 - | _SINT32 - | _SINT64 - | _FIXED32 - | _FIXED64 - | _SFIXED32 - | _SFIXED64 - | _BOOL - | _STRING - | _BYTES - | _ONEOF - | _MAP - | _EXTENSIONS - | _TO - | _MAX - | _RESERVED - | _ENUM - | _MESSAGE - | _EXTEND - | _SERVICE - | _RPC - | _STREAM - | _RETURNS - -// excludes stream -mtdElementName : _NAME - | _SYNTAX - | _EDITION - | _IMPORT - | _WEAK - | _PUBLIC - | _PACKAGE - | _OPTION - | _TRUE - | _FALSE - | _INF - | _NAN - | _REPEATED - | _OPTIONAL - | _REQUIRED | _DOUBLE | _FLOAT | _INT32 @@ -1448,11 +1169,11 @@ mtdElementName : _NAME | _EXTEND | _SERVICE | _RPC + | _STREAM | _RETURNS -identifier : _NAME +name : _NAME | _SYNTAX - | _EDITION | _IMPORT | _WEAK | _PUBLIC diff --git a/vendor/github.com/bufbuild/protocompile/parser/proto.y.go b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go index 048e5ccc..5a7e953f 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/proto.y.go +++ b/vendor/github.com/bufbuild/protocompile/parser/proto.y.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,69 +21,67 @@ import __yyfmt__ "fmt" import ( "math" - "strings" "github.com/bufbuild/protocompile/ast" ) type protoSymType struct { - yys int - file *ast.FileNode - syn *ast.SyntaxNode - ed *ast.EditionNode - fileElements []ast.FileElement - pkg nodeWithRunes[*ast.PackageNode] - imprt nodeWithRunes[*ast.ImportNode] - msg nodeWithRunes[*ast.MessageNode] - msgElements []ast.MessageElement - fld *ast.FieldNode - msgFld nodeWithRunes[*ast.FieldNode] - mapFld nodeWithRunes[*ast.MapFieldNode] - mapType *ast.MapTypeNode - grp *ast.GroupNode - msgGrp nodeWithRunes[*ast.GroupNode] - oo nodeWithRunes[*ast.OneofNode] - ooElement ast.OneofElement - ooElements []ast.OneofElement - ext nodeWithRunes[*ast.ExtensionRangeNode] - resvd nodeWithRunes[*ast.ReservedNode] - en nodeWithRunes[*ast.EnumNode] - enElements []ast.EnumElement - env nodeWithRunes[*ast.EnumValueNode] - extend nodeWithRunes[*ast.ExtendNode] - extElement ast.ExtendElement - extElements []ast.ExtendElement - svc nodeWithRunes[*ast.ServiceNode] - svcElements []ast.ServiceElement - mtd nodeWithRunes[*ast.RPCNode] - mtdMsgType *ast.RPCTypeNode - mtdElements []ast.RPCElement - optRaw *ast.OptionNode - opt nodeWithRunes[*ast.OptionNode] - opts *compactOptionSlices - refRaw *ast.FieldReferenceNode - ref nodeWithRunes[*ast.FieldReferenceNode] - optNms *fieldRefSlices - cmpctOpts *ast.CompactOptionsNode - rng *ast.RangeNode - rngs *rangeSlices - names *nameSlices - cidPart nodeWithRunes[*ast.IdentNode] - cid *identSlices - tid ast.IdentValueNode - sl *valueSlices - msgLitFlds *messageFieldList - msgLitFld *ast.MessageFieldNode - v ast.ValueNode - il ast.IntValueNode - str []*ast.StringLiteralNode - s *ast.StringLiteralNode - i *ast.UintLiteralNode - f *ast.FloatLiteralNode - id *ast.IdentNode - b *ast.RuneNode - bs []*ast.RuneNode - err error + yys int + file *ast.FileNode + syn *ast.SyntaxNode + fileDecl ast.FileElement + fileDecls []ast.FileElement + pkg *ast.PackageNode + imprt *ast.ImportNode + msg *ast.MessageNode + msgDecl ast.MessageElement + msgDecls []ast.MessageElement + fld *ast.FieldNode + mapFld *ast.MapFieldNode + mapType *ast.MapTypeNode + grp *ast.GroupNode + oo *ast.OneOfNode + ooDecl ast.OneOfElement + ooDecls []ast.OneOfElement + ext *ast.ExtensionRangeNode + resvd *ast.ReservedNode + en *ast.EnumNode + enDecl ast.EnumElement + enDecls []ast.EnumElement + env *ast.EnumValueNode + extend *ast.ExtendNode + extDecl ast.ExtendElement + extDecls []ast.ExtendElement + svc *ast.ServiceNode + svcDecl ast.ServiceElement + svcDecls []ast.ServiceElement + mtd *ast.RPCNode + rpcType *ast.RPCTypeNode + rpcDecl ast.RPCElement + rpcDecls []ast.RPCElement + opt *ast.OptionNode + opts *compactOptionList + ref *ast.FieldReferenceNode + optNms *fieldRefList + cmpctOpts *ast.CompactOptionsNode + rng *ast.RangeNode + rngs *rangeList + names *nameList + cid *identList + tid ast.IdentValueNode + sl *valueList + msgField *ast.MessageFieldNode + msgEntry *messageFieldEntry + msgLit *messageFieldList + v ast.ValueNode + il ast.IntValueNode + str *stringList + s *ast.StringLiteralNode + i *ast.UintLiteralNode + f *ast.FloatLiteralNode + id *ast.IdentNode + b *ast.RuneNode + err error } const _STRING_LIT = 57346 @@ -91,49 +89,48 @@ const _INT_LIT = 57347 const _FLOAT_LIT = 57348 const _NAME = 57349 const _SYNTAX = 57350 -const _EDITION = 57351 -const _IMPORT = 57352 -const _WEAK = 57353 -const _PUBLIC = 57354 -const _PACKAGE = 57355 -const _OPTION = 57356 -const _TRUE = 57357 -const _FALSE = 57358 -const _INF = 57359 -const _NAN = 57360 -const _REPEATED = 57361 -const _OPTIONAL = 57362 -const _REQUIRED = 57363 -const _DOUBLE = 57364 -const _FLOAT = 57365 -const _INT32 = 57366 -const _INT64 = 57367 -const _UINT32 = 57368 -const _UINT64 = 57369 -const _SINT32 = 57370 -const _SINT64 = 57371 -const _FIXED32 = 57372 -const _FIXED64 = 57373 -const _SFIXED32 = 57374 -const _SFIXED64 = 57375 -const _BOOL = 57376 -const _STRING = 57377 -const _BYTES = 57378 -const _GROUP = 57379 -const _ONEOF = 57380 -const _MAP = 57381 -const _EXTENSIONS = 57382 -const _TO = 57383 -const _MAX = 57384 -const _RESERVED = 57385 -const _ENUM = 57386 -const _MESSAGE = 57387 -const _EXTEND = 57388 -const _SERVICE = 57389 -const _RPC = 57390 -const _STREAM = 57391 -const _RETURNS = 57392 -const _ERROR = 57393 +const _IMPORT = 57351 +const _WEAK = 57352 +const _PUBLIC = 57353 +const _PACKAGE = 57354 +const _OPTION = 57355 +const _TRUE = 57356 +const _FALSE = 57357 +const _INF = 57358 +const _NAN = 57359 +const _REPEATED = 57360 +const _OPTIONAL = 57361 +const _REQUIRED = 57362 +const _DOUBLE = 57363 +const _FLOAT = 57364 +const _INT32 = 57365 +const _INT64 = 57366 +const _UINT32 = 57367 +const _UINT64 = 57368 +const _SINT32 = 57369 +const _SINT64 = 57370 +const _FIXED32 = 57371 +const _FIXED64 = 57372 +const _SFIXED32 = 57373 +const _SFIXED64 = 57374 +const _BOOL = 57375 +const _STRING = 57376 +const _BYTES = 57377 +const _GROUP = 57378 +const _ONEOF = 57379 +const _MAP = 57380 +const _EXTENSIONS = 57381 +const _TO = 57382 +const _MAX = 57383 +const _RESERVED = 57384 +const _ENUM = 57385 +const _MESSAGE = 57386 +const _EXTEND = 57387 +const _SERVICE = 57388 +const _RPC = 57389 +const _STREAM = 57390 +const _RETURNS = 57391 +const _ERROR = 57392 var protoToknames = [...]string{ "$end", @@ -144,7 +141,6 @@ var protoToknames = [...]string{ "_FLOAT_LIT", "_NAME", "_SYNTAX", - "_EDITION", "_IMPORT", "_WEAK", "_PUBLIC", @@ -225,504 +221,477 @@ const protoInitialStackSize = 16 var protoExca = [...]int16{ -1, 0, - 1, 6, - -2, 21, + 1, 4, + -2, 0, -1, 1, 1, -1, -2, 0, -1, 2, 1, 1, - -2, 21, + -2, 0, -1, 3, 1, 2, - -2, 21, - -1, 14, - 1, 7, -2, 0, - -1, 89, - 52, 60, - 61, 60, - 69, 60, - -2, 61, - -1, 101, - 55, 37, - 58, 37, - 62, 37, - 67, 37, - 69, 37, - -2, 34, - -1, 112, - 52, 60, - 61, 60, - 69, 60, - -2, 62, - -1, 118, - 56, 249, + -1, 22, + 1, 3, + -2, 0, + -1, 95, + 55, 185, + -2, 0, + -1, 96, + 55, 173, + -2, 0, + -1, 97, + 55, 202, + -2, 0, + -1, 99, + 55, 210, + -2, 0, + -1, 110, + 55, 54, + -2, 0, + -1, 289, + 55, 52, + 61, 52, -2, 0, - -1, 121, - 55, 37, - 58, 37, - 62, 37, - 67, 37, - 69, 37, - -2, 35, - -1, 140, - 56, 225, + -1, 354, + 61, 54, -2, 0, - -1, 142, - 56, 214, + -1, 370, + 55, 124, -2, 0, - -1, 144, - 56, 250, + -1, 404, + 61, 54, -2, 0, - -1, 198, - 56, 262, + -1, 409, + 61, 54, -2, 0, - -1, 203, - 56, 83, - 62, 83, + -1, 497, + 61, 54, -2, 0, - -1, 214, - 56, 226, + -1, 547, + 55, 185, -2, 0, - -1, 271, - 56, 215, + -1, 551, + 55, 185, -2, 0, - -1, 377, - 56, 263, + -1, 555, + 55, 185, -2, 0, - -1, 464, - 56, 155, + -1, 573, + 55, 222, -2, 0, - -1, 523, - 69, 144, - -2, 141, - -1, 531, - 56, 156, + -1, 580, + 55, 185, + -2, 0, + -1, 583, + 55, 185, + -2, 0, + -1, 586, + 55, 185, -2, 0, -1, 607, - 67, 52, - -2, 49, - -1, 665, - 69, 144, - -2, 142, - -1, 690, - 67, 52, - -2, 50, - -1, 732, - 56, 273, + 55, 185, -2, 0, - -1, 745, - 56, 274, + -1, 619, + 55, 185, -2, 0, } const protoPrivate = 57344 -const protoLast = 2053 +const protoLast = 2396 var protoAct = [...]int16{ - 140, 7, 746, 7, 7, 100, 139, 18, 440, 394, - 604, 436, 607, 439, 502, 39, 524, 596, 95, 532, - 496, 127, 437, 422, 520, 200, 32, 34, 523, 233, - 421, 40, 90, 93, 94, 405, 102, 106, 36, 96, - 109, 435, 272, 85, 378, 458, 326, 404, 21, 20, - 19, 107, 108, 149, 215, 202, 145, 98, 101, 86, - 663, 89, 449, 390, 134, 706, 703, 598, 707, 513, - 9, 652, 395, 510, 465, 9, 511, 396, 717, 651, - 507, 459, 459, 460, 452, 459, 456, 9, 506, 459, - 459, 462, 739, 90, 693, 451, 655, 598, 459, 9, - 680, 653, 459, 687, 508, 459, 423, 459, 124, 125, - 453, 115, 459, 459, 459, 134, 126, 133, 142, 138, - 131, 129, 497, 395, 198, 130, 423, 134, 199, 448, - 416, 388, 389, 711, 489, 395, 505, 119, 9, 387, - 207, 666, 488, 593, 9, 468, 472, 113, 222, 112, - 273, 386, 470, 462, 587, 9, 373, 120, 121, 385, - 110, 40, 110, 691, 674, 428, 424, 414, 374, 122, - 114, 375, 279, 760, 758, 754, 750, 104, 744, 743, - 741, 733, 729, 721, 695, 9, 424, 716, 753, 219, - 217, 218, 668, 383, 227, 376, 322, 270, 213, 728, - 719, 323, 713, 658, 464, 123, 379, 118, 117, 207, - 116, 5, 6, 104, 399, 9, 598, 104, 670, 324, - 31, 702, 222, 667, 493, 490, 9, 492, 430, 392, - 419, 111, 13, 12, 403, 599, 407, 408, 413, 528, - 463, 40, 381, 748, 726, 8, 412, 724, 397, 659, - 33, 415, 15, 656, 26, 26, 9, 37, 38, 384, - 210, 209, 105, 219, 217, 218, 103, 35, 227, 400, - 595, 417, 211, 212, 402, 23, 529, 594, 104, 273, - 409, 582, 406, 24, 413, 516, 25, 26, 382, 495, - 491, 4, 412, 33, 10, 11, 731, 745, 380, 197, - 377, 279, 475, 476, 477, 478, 479, 480, 481, 482, - 483, 484, 485, 486, 418, 22, 143, 28, 27, 29, - 30, 144, 274, 425, 141, 271, 220, 420, 275, 225, - 411, 426, 427, 410, 40, 530, 531, 214, 231, 224, - 221, 535, 147, 223, 429, 146, 534, 216, 204, 203, - 447, 499, 601, 538, 150, 228, 605, 99, 602, 327, - 540, 154, 234, 277, 606, 329, 542, 156, 237, 474, - 391, 393, 438, 132, 128, 87, 88, 432, 206, 91, - 431, 521, 518, 533, 522, 379, 17, 16, 434, 14, - 3, 2, 1, 0, 0, 442, 442, 0, 0, 0, - 0, 207, 0, 0, 457, 0, 0, 454, 455, 466, - 0, 469, 471, 0, 0, 0, 0, 0, 0, 450, - 473, 445, 433, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 444, 0, 494, 0, 0, 0, 0, 0, - 0, 0, 0, 487, 0, 0, 0, 498, 0, 442, - 461, 0, 0, 0, 467, 503, 514, 0, 0, 517, - 0, 525, 526, 0, 0, 90, 504, 0, 583, 584, - 0, 0, 0, 0, 0, 0, 0, 0, 586, 0, - 0, 0, 0, 0, 585, 0, 0, 0, 588, 0, - 591, 0, 509, 0, 0, 0, 0, 0, 527, 0, - 512, 515, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 592, 0, 660, 661, 657, 590, - 0, 0, 0, 0, 0, 0, 0, 90, 0, 0, - 654, 0, 0, 589, 0, 0, 0, 0, 0, 0, - 0, 597, 0, 90, 672, 673, 664, 40, 0, 0, - 665, 669, 0, 0, 671, 0, 0, 675, 0, 0, - 0, 0, 662, 0, 0, 0, 0, 0, 0, 0, + 31, 118, 117, 125, 8, 106, 8, 8, 496, 494, + 593, 423, 366, 406, 399, 430, 81, 325, 77, 79, + 80, 82, 84, 327, 316, 107, 8, 310, 411, 408, + 105, 280, 124, 228, 139, 177, 407, 413, 30, 425, + 544, 26, 351, 110, 605, 607, 569, 567, 412, 565, + 85, 354, 555, 87, 88, 89, 352, 75, 367, 367, + 553, 367, 367, 551, 367, 367, 549, 547, 545, 537, + 536, 531, 524, 514, 512, 367, 367, 481, 500, 499, + 367, 367, 365, 367, 574, 414, 367, 367, 110, 367, + 535, 400, 367, 109, 77, 94, 409, 367, 328, 116, + 178, 104, 405, 281, 110, 98, 328, 346, 319, 346, + 576, 292, 409, 103, 346, 345, 577, 345, 543, 301, + 346, 564, 345, 541, 346, 504, 598, 344, 345, 503, + 230, 185, 345, 115, 485, 309, 346, 313, 314, 287, + 597, 538, 515, 350, 345, 501, 480, 391, 303, 305, + 307, 349, 374, 29, 328, 368, 523, 329, 338, 320, + 343, 93, 434, 318, 317, 329, 315, 440, 441, 442, + 443, 444, 445, 17, 446, 447, 448, 449, 91, 619, + 178, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 462, 463, 464, 436, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, + 317, 185, 322, 329, 14, 517, 572, 586, 573, 438, + 4, 15, 284, 583, 16, 17, 580, 14, 596, 284, + 596, 370, 99, 17, 15, 97, 336, 16, 17, 17, + 17, 17, 96, 95, 617, 611, 591, 590, 589, 333, + 334, 335, 584, 581, 578, 19, 18, 20, 21, 337, + 230, 331, 571, 563, 13, 557, 527, 285, 19, 18, + 20, 21, 283, 519, 285, 339, 422, 13, 595, 283, + 595, 609, 390, 281, 373, 372, 342, 341, 332, 323, + 292, 302, 286, 102, 101, 100, 90, 355, 86, 25, + 561, 560, 516, 488, 358, 359, 360, 361, 362, 363, + 487, 340, 486, 420, 419, 356, 418, 417, 347, 416, + 415, 353, 397, 371, 364, 324, 92, 24, 490, 427, + 392, 369, 29, 5, 394, 395, 396, 23, 27, 28, + 122, 11, 388, 11, 11, 588, 120, 10, 389, 10, + 10, 587, 109, 299, 297, 292, 23, 355, 355, 530, + 398, 29, 312, 11, 298, 428, 296, 294, 77, 10, + 529, 528, 121, 9, 431, 9, 9, 295, 511, 510, + 509, 403, 401, 376, 377, 378, 379, 380, 381, 382, + 383, 384, 385, 386, 387, 9, 426, 508, 507, 479, + 317, 506, 482, 109, 489, 292, 478, 312, 421, 483, + 292, 393, 29, 282, 279, 3, 484, 492, 22, 12, + 227, 179, 176, 123, 326, 311, 180, 128, 429, 137, + 127, 433, 126, 431, 513, 505, 520, 521, 229, 432, + 119, 289, 434, 290, 435, 518, 235, 440, 441, 442, + 443, 444, 445, 17, 446, 447, 448, 449, 134, 437, + 522, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 462, 463, 464, 436, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, + 236, 525, 140, 183, 439, 238, 143, 526, 292, 438, + 375, 108, 532, 291, 76, 533, 592, 355, 534, 424, + 7, 6, 2, 1, 0, 542, 77, 109, 0, 546, + 548, 550, 552, 554, 556, 559, 539, 558, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, + 0, 566, 568, 570, 0, 562, 0, 0, 575, 0, + 579, 0, 0, 0, 582, 0, 0, 0, 585, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 676, 0, 0, 0, 0, 0, 0, - 679, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 678, 0, 0, 0, - 0, 0, 0, 682, 0, 684, 689, 0, 690, 686, - 685, 0, 0, 0, 0, 0, 0, 0, 677, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 681, 683, 0, 688, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 442, 0, - 697, 0, 0, 699, 503, 696, 0, 692, 0, 701, - 0, 0, 0, 133, 0, 504, 131, 129, 710, 0, - 709, 130, 0, 0, 0, 0, 715, 712, 0, 700, - 704, 0, 0, 0, 0, 0, 720, 0, 0, 722, - 718, 714, 694, 0, 0, 698, 0, 0, 133, 0, - 0, 131, 129, 0, 727, 0, 130, 732, 705, 708, - 730, 0, 735, 725, 723, 0, 734, 0, 0, 0, - 0, 0, 0, 0, 749, 742, 0, 0, 0, 0, - 747, 736, 737, 0, 0, 755, 752, 0, 756, 0, - 0, 757, 0, 747, 0, 0, 751, 0, 0, 0, - 759, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 738, 501, 740, 33, 137, 135, 41, 42, 43, 44, - 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 0, 0, 0, 0, 134, 0, 0, 0, 0, 0, - 0, 0, 395, 0, 441, 0, 0, 0, 500, 33, - 137, 135, 41, 42, 43, 44, 45, 46, 47, 48, - 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 0, 0, 0, 0, - 134, 0, 0, 0, 0, 0, 0, 0, 395, 0, - 441, 0, 0, 443, 33, 137, 135, 41, 42, 43, + 0, 0, 0, 0, 0, 0, 0, 594, 0, 0, + 0, 301, 0, 600, 301, 0, 602, 301, 0, 604, + 0, 0, 0, 0, 0, 0, 594, 0, 109, 109, + 606, 608, 301, 610, 301, 0, 301, 612, 613, 0, + 618, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 301, 0, 621, 301, 495, 0, 29, 114, 111, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 110, 0, 0, 0, + 0, 0, 0, 0, 497, 113, 112, 0, 0, 0, + 493, 29, 114, 111, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, + 0, 110, 0, 0, 0, 0, 0, 0, 0, 404, + 113, 112, 0, 0, 402, 29, 114, 111, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 0, 0, 0, 0, 134, 0, 0, 0, 0, - 0, 0, 0, 395, 0, 441, 41, 42, 43, 44, + 74, 0, 0, 0, 0, 110, 0, 0, 0, 0, + 0, 0, 0, 497, 113, 112, 29, 114, 111, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 110, 0, 0, 0, + 0, 0, 540, 0, 0, 113, 112, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 205, 92, 0, 0, 519, 41, + 0, 0, 0, 502, 0, 0, 0, 293, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 498, 0, 0, 0, 293, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 410, 0, 0, 0, 293, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 446, 0, 205, 0, 0, 0, - 208, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 348, 0, 0, 0, + 293, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 288, 0, 0, + 0, 293, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 0, 0, 0, 0, 0, - 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 208, 33, 137, 135, 41, 42, 43, 44, - 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 0, 0, 0, 0, 134, 0, 0, 0, 0, 0, - 205, 0, 0, 0, 136, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 0, + 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 33, 423, 208, 41, 42, 43, + 0, 0, 293, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 130, 0, 0, + 0, 78, 144, 145, 146, 147, 148, 149, 17, 150, + 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, + 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, + 622, 130, 0, 0, 141, 0, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, + 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, + 0, 129, 0, 0, 620, 130, 0, 0, 141, 0, + 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, + 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, + 173, 174, 175, 0, 0, 129, 0, 0, 616, 130, + 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, + 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, + 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, + 0, 0, 615, 130, 0, 0, 141, 0, 144, 145, + 146, 147, 148, 149, 17, 150, 151, 152, 153, 133, + 132, 131, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 136, 142, + 135, 170, 171, 138, 19, 18, 20, 172, 173, 174, + 175, 0, 0, 129, 0, 0, 614, 130, 0, 0, + 141, 0, 144, 145, 146, 147, 148, 149, 17, 150, + 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, + 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, + 603, 130, 0, 0, 141, 0, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, + 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, + 0, 129, 0, 0, 601, 130, 0, 0, 141, 0, + 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, + 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, + 173, 174, 175, 0, 0, 129, 0, 0, 599, 130, + 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, + 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, + 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, + 0, 0, 300, 130, 0, 0, 141, 0, 144, 145, + 146, 147, 148, 149, 17, 150, 151, 152, 153, 133, + 132, 131, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 136, 142, + 135, 170, 171, 138, 19, 18, 20, 172, 173, 174, + 175, 0, 0, 129, 0, 0, 231, 0, 0, 0, + 141, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 234, 233, 232, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 0, 0, 0, 0, 0, 330, + 231, 0, 0, 237, 0, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 234, 233, 232, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 424, 41, 42, 43, 44, - 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 92, 41, 42, 43, 44, - 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, - 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, - 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, - 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 600, 650, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 603, 330, 331, 332, 333, 334, 335, - 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, - 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, - 356, 357, 358, 359, 401, 360, 361, 362, 363, 364, - 365, 366, 367, 368, 369, 370, 371, 372, 0, 0, - 0, 0, 0, 226, 0, 0, 0, 328, 238, 239, - 240, 241, 242, 243, 244, 26, 245, 246, 247, 248, - 153, 152, 151, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, 0, 230, - 236, 229, 264, 265, 232, 28, 27, 29, 266, 267, - 268, 269, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 235, 330, 331, 332, 333, 334, 335, 336, 337, - 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, - 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, - 358, 359, 325, 360, 361, 362, 363, 364, 365, 366, - 367, 368, 369, 370, 371, 372, 0, 0, 0, 0, - 0, 148, 0, 0, 0, 328, 157, 158, 159, 160, - 161, 162, 163, 164, 165, 166, 167, 168, 153, 152, - 151, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 0, 184, 185, 186, - 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 0, 0, 0, 0, 0, 536, 0, 0, 0, 155, - 543, 544, 545, 546, 547, 548, 549, 537, 550, 551, - 552, 553, 0, 0, 0, 554, 555, 556, 557, 558, - 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, - 539, 569, 570, 571, 572, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 541, 210, 209, 41, 42, 43, 44, + 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 491, 74, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 83, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 308, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 83, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 55, 56, 57, 58, 59, 60, 306, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 33, 406, 0, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 276, 0, 0, - 0, 0, 280, 281, 282, 283, 284, 285, 286, 26, - 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, - 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, - 307, 308, 309, 310, 311, 312, 313, 314, 278, 315, - 316, 317, 318, 319, 320, 321, 398, 0, 0, 0, - 0, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 608, - 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, - 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, - 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, - 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, - 649, 0, 650, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 304, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 0, 0, 0, 0, 0, 182, 0, + 0, 0, 83, 186, 187, 188, 189, 190, 191, 17, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 184, 220, + 221, 222, 223, 224, 225, 226, 0, 182, 181, 0, + 0, 321, 186, 187, 188, 189, 190, 191, 17, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 184, 220, 221, + 222, 223, 224, 225, 226, 357, 0, 181, 0, 0, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, } var protoPact = [...]int16{ - 203, -1000, 162, 162, -1000, 181, 180, 273, 167, -1000, - -1000, -1000, 289, 289, 273, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 246, 1958, 1329, 1958, 1958, 1389, - 1958, -1000, 213, -1000, 209, -1000, 173, 289, 289, 102, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 179, -1000, 1329, 110, -1000, - -1000, -1000, 1389, 155, 153, 152, -1000, 1958, -1000, 1958, - 109, -1000, 150, -1000, -1000, -1000, -1000, 173, 173, -1000, - 1958, 1149, -1000, -1000, -1000, 52, 162, 162, 1659, -1000, - -1000, -1000, -1000, 162, -1000, -1000, -1000, 162, -1000, -1000, - 274, -1000, -1000, -1000, 1084, -1000, 255, -1000, -1000, 142, - 1551, 141, 1865, 140, 1659, -1000, -1000, -1000, 166, 1605, - 1958, -1000, -1000, -1000, 108, 1958, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 139, 240, -1000, - 137, -1000, -1000, 1208, 98, 78, 9, -1000, 1914, -1000, - -1000, -1000, -1000, 162, 1551, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1497, 1958, 277, - 1958, 1958, 1816, -1000, 107, 1958, 67, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 162, 1865, -1000, -1000, -1000, -1000, -1000, 178, 1270, -1000, + 212, -1000, 225, 225, 276, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 247, 328, 2346, 1236, 2346, 2346, + 1941, 2346, 225, -1000, 408, -1000, 246, 408, 408, 408, + 244, 119, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 275, 102, -1000, 1941, 189, + 188, 181, -1000, 2346, 178, 243, -1000, 242, 241, -1000, + -1000, 2346, 812, 1236, 33, 1781, 2255, 1888, -1000, 227, + -1000, -1000, -1000, -1000, 240, -1000, -1000, -1000, -1000, -1000, + 1175, -1000, 361, 348, -1000, -1000, -1000, 1727, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 162, -1000, -1000, 1958, 1958, 105, 1958, -1000, + 239, 2153, 2100, 2047, 2346, 402, 2346, 2346, 357, -1000, + -1000, 2346, 46, 100, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 2206, -1000, -1000, -1000, + -1000, -1000, 237, 274, 149, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 176, 1958, 100, 162, 240, -1000, -1000, - -1000, -1000, 1958, -1000, -1000, -1000, -1000, -1000, -1000, 835, - 835, -1000, -1000, -1000, -1000, 1022, 60, 26, 41, -1000, - -1000, 1958, 1958, 34, 30, -1000, 199, 149, 22, 92, - 91, 85, 274, -1000, 1958, 100, 278, -1000, -1000, 121, - 81, -1000, 184, -1000, 285, -1000, 175, 172, 1958, 100, - 284, -1000, -1000, -1000, 56, -1000, -1000, -1000, -1000, 274, - -1000, 1769, -1000, 769, -1000, 74, -1000, 19, -1000, 35, - -1000, -1000, 1958, -1000, 21, 17, 280, -1000, 162, 959, - 162, 162, 277, 234, 1713, 276, -1000, 162, 162, -1000, - 289, -1000, 1958, -1000, 93, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 46, 121, 162, - 101, -1000, 272, 265, -1000, 44, 185, 1443, -1000, 10, - -1000, 32, -1000, -1000, -1000, -1000, -1000, 72, -1000, 27, - 248, 162, 148, 244, -1000, 162, 46, -1000, -9, -1000, - -1000, 1329, 80, -1000, 171, -1000, -1000, -1000, -1000, -1000, - 136, 1713, -1000, -1000, -1000, -1000, 165, 1329, 1958, 1958, - 104, 1958, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1834, -1000, -1000, + -1000, 236, 2153, 2100, 2047, 2346, -1000, 2346, 99, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 46, -1000, -1000, 274, -1000, 1389, -1000, 162, - -1000, -1000, -1000, -1000, 45, 44, -1000, 163, -1000, 56, - 1389, 36, -1000, 1958, -1000, 2002, 103, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 220, + -1000, -1000, -1000, -1000, 235, 2346, -1000, 105, 72, 1114, + 91, -11, -1000, 2303, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 2346, 2346, 2346, 2346, 2346, 2346, 273, + 30, 95, 291, 177, 272, 233, 232, 92, -1000, 360, + 2346, -1000, -1000, -1000, 101, 230, 87, 290, -1000, 406, + -1000, -1000, -1000, 2346, 2346, 2346, 271, -1000, 2346, -1000, + -1000, -1000, 26, -1000, -1000, -1000, -1000, -1000, 84, -1000, + -1000, 687, 34, -1000, 1053, -1000, -20, 17, 269, 268, + 266, 265, 263, 262, 403, -1000, 224, 1236, 402, 324, + 440, 401, -1000, -1000, 408, 86, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 25, + -1000, 101, 93, -1000, 261, 259, 252, 399, -1000, 279, + 1994, -1000, 622, -1000, 992, -1000, 11, 10, 85, 931, + 68, 64, -1000, 2346, -1000, 396, 393, 392, 375, 374, + 373, 22, -1000, 5, 82, 251, -1000, -1000, -1000, 160, + -1000, -1000, -1000, -1000, 221, 2346, 2346, -1000, 2346, 97, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 900, -1000, -1000, -1000, 39, 128, 162, 46, - -1000, -1000, 162, -1000, -1000, -1000, -1000, 1149, 162, -1000, - -1000, 169, 14, 13, 1958, 100, -1000, 162, 71, -1000, - 162, 147, -1000, 163, -1000, 132, 11, -1000, -1000, -1000, - -1000, -1000, -1000, 162, 145, 162, 127, -1000, 162, -1000, - -1000, -1000, 1149, 242, -1000, 163, 239, 162, 144, -1000, - -1000, -1000, 126, 162, -1000, -1000, 162, -1000, 125, 162, - -1000, 162, -1000, 163, 44, -1000, 37, 124, 162, -1000, - 123, 122, 241, 162, 120, -1000, -1000, -1000, 163, 162, - 133, -1000, 119, -1000, 162, 241, -1000, -1000, -1000, -1000, - 162, -1000, 118, 162, -1000, -1000, -1000, -1000, -1000, 117, - -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 20, -1000, + 1941, -1000, 214, -1000, -1000, -1000, 366, 365, 354, 19, + 26, 1941, 24, -1000, 2, 1, 81, 870, 62, -1000, + -1000, 50, 57, -1000, -1000, -28, 16, 13, 14, 9, + 8, -2, -1000, 213, -1000, 1236, 812, -1000, -1000, -1000, + 250, 249, -1000, 2346, -1000, 211, 60, -1000, -3, -5, + -6, -1000, 210, 164, 18, -1000, -1000, -1000, 751, 49, + 55, -1000, -1000, -1000, -1000, -1000, 202, 1781, 172, -1000, + 201, 1781, 169, -1000, 200, 1781, 163, -1000, -1000, -1000, + 346, 340, -1000, -1000, -1000, -1000, 196, -1000, 195, -1000, + 194, -1000, -1000, 228, -1000, -1000, 80, 66, -1000, 1673, + 1781, -1000, 1619, 1781, -1000, 1565, 1781, -8, -9, -1000, + -1000, -1000, 226, -1000, -1000, -1000, 193, 751, 751, -1000, + 1511, -1000, 1457, -1000, 1403, -1000, 192, 1781, 125, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1349, 1781, + -1000, 1295, -1000, } var protoPgo = [...]int16{ - 0, 392, 391, 390, 291, 252, 389, 387, 386, 384, - 383, 7, 28, 24, 382, 381, 379, 378, 376, 61, - 59, 16, 375, 45, 41, 21, 374, 11, 9, 22, - 8, 373, 372, 14, 371, 370, 23, 5, 369, 368, - 367, 366, 365, 364, 363, 53, 58, 57, 12, 10, - 15, 362, 361, 360, 359, 358, 39, 357, 356, 18, - 355, 354, 353, 46, 352, 351, 350, 349, 55, 25, - 348, 347, 346, 345, 343, 342, 341, 340, 339, 338, - 50, 54, 337, 6, 19, 336, 335, 333, 330, 329, - 328, 29, 35, 30, 47, 327, 326, 49, 42, 325, - 324, 322, 48, 56, 321, 316, 13, 315, 44, 300, - 299, 298, 2, 297, 296, 20, 17, 0, 245, + 0, 513, 512, 333, 415, 511, 510, 3, 509, 11, + 10, 506, 504, 503, 39, 12, 8, 30, 5, 29, + 501, 23, 0, 500, 496, 495, 494, 493, 21, 492, + 490, 459, 16, 458, 446, 444, 9, 13, 443, 441, + 28, 440, 439, 438, 432, 32, 431, 430, 429, 372, + 1, 2, 15, 428, 24, 427, 426, 34, 425, 424, + 27, 17, 423, 346, 35, 422, 421, 340, 33, 420, + 25, 419, 31, 414, 413, 14, } var protoR1 = [...]int8{ - 0, 1, 1, 1, 1, 1, 1, 4, 6, 6, - 5, 5, 5, 5, 5, 5, 5, 5, 118, 118, - 117, 117, 116, 116, 2, 3, 7, 7, 7, 8, - 50, 50, 56, 56, 57, 57, 47, 47, 46, 51, - 51, 52, 52, 53, 53, 54, 54, 55, 55, 58, - 58, 49, 49, 48, 10, 11, 18, 18, 19, 20, - 20, 22, 22, 21, 21, 16, 25, 25, 26, 26, - 26, 26, 30, 30, 30, 30, 31, 31, 106, 106, - 28, 28, 69, 68, 68, 67, 67, 67, 67, 67, - 67, 70, 70, 70, 17, 17, 17, 17, 24, 24, - 24, 27, 27, 27, 27, 35, 35, 29, 29, 29, - 32, 32, 32, 65, 65, 33, 33, 34, 34, 34, - 66, 66, 59, 59, 60, 60, 61, 61, 62, 62, - 63, 63, 64, 64, 45, 45, 45, 23, 23, 14, - 14, 15, 15, 13, 13, 12, 9, 9, 75, 75, - 77, 77, 77, 77, 74, 86, 86, 85, 85, 84, - 84, 84, 84, 84, 72, 72, 72, 72, 76, 76, - 76, 76, 78, 78, 78, 78, 79, 38, 38, 38, - 38, 38, 38, 38, 38, 38, 38, 38, 38, 96, - 96, 94, 94, 92, 92, 92, 95, 95, 93, 93, - 93, 36, 36, 89, 89, 90, 90, 91, 91, 87, - 87, 88, 88, 97, 100, 100, 99, 99, 98, 98, - 98, 98, 101, 101, 80, 83, 83, 82, 82, 81, - 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, - 71, 71, 71, 71, 71, 71, 71, 71, 102, 105, - 105, 104, 104, 103, 103, 103, 103, 73, 73, 73, - 73, 107, 110, 110, 109, 109, 108, 108, 108, 111, - 111, 115, 115, 114, 114, 113, 113, 112, 112, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 40, 44, 44, 44, 44, 44, 44, 44, 44, - 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, - 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, - 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, - 44, 44, 44, 44, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 43, 37, - 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, - 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, - 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, - 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, - 37, 37, 37, + 0, 1, 1, 1, 1, 4, 4, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 2, 5, 5, + 5, 6, 28, 28, 29, 29, 30, 30, 31, 31, + 7, 14, 14, 12, 12, 16, 16, 17, 17, 17, + 20, 20, 20, 20, 20, 20, 20, 20, 70, 70, + 18, 18, 40, 40, 40, 39, 39, 39, 39, 39, + 39, 38, 38, 38, 38, 38, 38, 38, 38, 38, + 38, 38, 13, 13, 13, 13, 37, 37, 19, 19, + 19, 36, 36, 36, 36, 36, 36, 32, 32, 33, + 33, 34, 34, 35, 35, 41, 41, 41, 41, 41, + 41, 41, 41, 43, 43, 43, 43, 43, 43, 43, + 43, 15, 9, 9, 8, 45, 45, 45, 45, 45, + 45, 44, 53, 53, 53, 52, 52, 52, 52, 52, + 42, 42, 46, 46, 47, 47, 48, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 62, + 62, 60, 60, 58, 58, 58, 61, 61, 59, 59, + 59, 21, 21, 55, 55, 56, 56, 57, 54, 54, + 63, 65, 65, 65, 64, 64, 64, 64, 64, 64, + 66, 66, 49, 51, 51, 51, 50, 50, 50, 50, + 50, 50, 50, 50, 50, 50, 50, 50, 50, 67, + 69, 69, 69, 68, 68, 68, 68, 71, 73, 73, + 73, 72, 72, 72, 72, 72, 74, 74, 75, 75, + 11, 11, 11, 10, 10, 10, 10, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, } var protoR2 = [...]int8{ - 0, 1, 1, 1, 2, 2, 0, 2, 2, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 1, 0, 1, 0, 4, 4, 3, 4, 4, 3, - 1, 3, 1, 2, 1, 2, 1, 1, 2, 1, - 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, - 2, 1, 1, 2, 5, 5, 1, 1, 2, 1, - 1, 1, 2, 1, 2, 3, 1, 1, 1, 1, - 1, 1, 1, 2, 1, 2, 2, 2, 1, 2, - 3, 2, 1, 1, 2, 1, 2, 2, 2, 2, - 1, 3, 2, 3, 1, 3, 5, 3, 1, 1, - 1, 1, 1, 2, 1, 1, 1, 1, 3, 2, - 3, 2, 3, 1, 3, 1, 1, 3, 2, 3, - 1, 3, 1, 2, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 2, 1, 1, 1, 3, 2, 1, - 2, 1, 2, 1, 1, 2, 3, 1, 8, 9, - 9, 10, 7, 8, 6, 0, 1, 2, 1, 1, - 1, 1, 2, 1, 5, 6, 3, 4, 7, 8, - 5, 6, 5, 6, 3, 4, 6, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, + 0, 1, 1, 2, 0, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 1, 4, 3, 4, + 4, 3, 1, 3, 1, 3, 1, 3, 1, 3, + 5, 1, 3, 1, 3, 1, 1, 1, 1, 1, + 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, + 3, 3, 1, 2, 0, 1, 2, 2, 2, 2, + 1, 3, 3, 4, 4, 5, 5, 4, 3, 2, + 5, 4, 1, 3, 5, 3, 1, 3, 1, 3, + 3, 1, 3, 3, 5, 3, 5, 1, 2, 1, + 2, 1, 2, 1, 2, 6, 6, 6, 7, 7, + 7, 5, 6, 6, 6, 6, 7, 7, 7, 5, + 6, 3, 1, 3, 3, 8, 8, 8, 9, 9, + 9, 5, 2, 1, 0, 1, 1, 1, 2, 1, + 5, 6, 7, 8, 5, 6, 6, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 1, 3, 1, 3, 3, 1, 3, 1, 3, - 3, 1, 2, 4, 1, 4, 1, 3, 3, 1, - 3, 1, 3, 6, 1, 2, 2, 1, 1, 1, - 1, 1, 4, 5, 6, 1, 2, 2, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 6, 7, 5, 6, 4, 5, 3, 4, 6, 0, - 1, 2, 1, 1, 1, 2, 1, 6, 7, 5, - 6, 6, 1, 2, 2, 1, 1, 1, 1, 6, - 9, 4, 3, 1, 2, 2, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 1, 2, 3, 1, 3, 1, 3, 1, 3, + 5, 2, 1, 0, 1, 1, 1, 1, 2, 1, + 4, 5, 5, 2, 1, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 1, 5, + 2, 1, 0, 1, 1, 2, 1, 5, 2, 1, + 0, 1, 1, 1, 2, 1, 6, 8, 4, 3, + 2, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -746,179 +715,151 @@ var protoR2 = [...]int8{ } var protoChk = [...]int16{ - -1000, -1, -2, -3, -4, 8, 9, -117, -118, 53, - -4, -4, 52, 52, -6, -5, -7, -8, -11, -80, - -97, -102, -107, 2, 10, 13, 14, 45, 44, 46, - 47, 53, -106, 4, -106, -5, -106, 11, 12, -50, - -37, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, -21, -20, -22, -18, -19, - -37, -16, 66, -37, -37, -59, -56, 60, -47, -57, - -37, -46, -37, 53, 4, 53, -117, -106, -106, -117, - 60, 52, -19, -20, 60, -59, 55, 55, 55, -56, - -47, -46, 60, 55, -117, -117, -37, -25, -26, -28, - -106, -30, -31, -37, 55, 6, 65, 5, 67, -83, - -117, -100, -117, -105, -104, -103, -73, -75, 2, -45, - -61, 21, 20, 19, -52, 60, -40, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, -110, -117, -117, - -69, 56, -68, -67, -70, 2, -17, -37, 68, 6, - 5, 17, 18, 56, -82, -81, -71, -97, -80, -102, - -96, -77, -11, -74, -78, -89, 2, -45, -60, 40, - 38, -79, 43, -91, -51, 60, 39, -39, 7, 8, - 9, 10, 11, 12, 13, 15, 16, 17, 18, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - 33, 34, 35, 36, 41, 42, 47, 48, 49, 50, - 56, -99, -98, -11, -101, -90, 2, -44, 43, -91, - 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, - 49, 50, 56, -103, 53, 37, -63, -54, 60, -42, - 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + -1000, -1, -2, -4, 8, -3, -5, -6, -7, -49, + -63, -67, -71, 52, 2, 9, 12, 13, 44, 43, + 45, 46, -4, -3, 51, 52, -70, 10, 11, 4, + -28, -22, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, -14, -12, -22, 65, -22, + -22, -32, -28, 59, -22, -70, 52, -70, -70, -70, + 52, 59, 51, 59, -32, 54, 54, 54, -28, 54, + 52, 52, 52, -28, -16, -17, -18, -70, -20, -22, + 54, 6, 64, 63, 5, -14, 66, -51, -50, -41, + -63, -49, -67, -62, -45, -7, -44, -47, -55, 52, + 2, 20, 19, 18, -33, 39, 37, -48, 42, -57, + -29, 59, 38, -24, 7, 8, 9, 10, 11, 12, + 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, -37, 60, -50, 56, -109, -108, -11, - -111, 2, 48, 56, -68, 61, 53, 61, 53, 54, - 54, -35, -29, -34, -28, 63, 68, -56, 2, -117, - -81, 37, -63, -37, -94, -92, 5, -37, -37, -94, - -87, -88, -106, -37, 60, -50, 63, -117, -98, 52, - -95, -93, -36, 5, 65, -117, -37, -37, 60, -50, - 52, -37, -117, -108, -37, -24, -27, -29, -32, -106, - -30, 65, -37, 68, -24, -69, 62, -66, 69, 2, - -29, 69, 58, 69, -37, -37, 52, -117, -23, 68, - 53, -23, 61, 41, 55, 52, -117, -23, 53, -117, - 61, -117, 61, -37, -38, 24, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, -36, 61, 53, - 41, 5, 52, 52, -37, 5, -115, 66, -37, -65, - 69, 2, -33, -27, -29, 62, 69, 61, 69, -56, - 52, 55, -23, 52, -117, -23, 5, -117, -14, 69, - -13, -15, -9, -12, -21, -117, -117, -92, 5, 42, - -86, -85, -84, -10, -72, -76, 2, 14, -62, 37, - -53, 60, -41, 7, 8, 9, 10, 11, 12, 13, - 15, 16, 17, 18, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, - 49, 50, 5, -117, -117, -106, -37, 61, -117, -23, - -93, -117, -36, 42, 5, 5, -116, -23, 53, 50, - 49, -64, -55, 60, -49, -58, -43, -48, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, - 50, 69, 61, 69, -29, 69, 5, -83, 55, 5, - -117, -117, -23, 69, -13, -12, 61, 52, 56, -84, - 53, -21, -37, -37, 60, -50, -117, -23, -59, -117, - 55, -23, -116, -23, -116, -115, -59, 67, -56, -49, - -48, 60, -33, 55, -23, 56, -83, -117, -23, -117, - -25, -117, 52, 52, -116, -23, 52, 55, -23, -37, - -117, 62, -83, 55, -116, -117, 55, 67, -83, 55, - -117, 56, -117, -25, 5, -116, 5, -83, 55, 56, - -83, -114, -117, 56, -83, -117, -116, -116, -23, 55, - -23, 56, -83, 56, 56, -113, -112, -11, 2, -117, - 56, -116, -83, 55, 56, -117, -112, -117, 56, -83, - 56, + 40, 41, 46, 47, 48, 49, -65, -64, -7, -66, + -56, 52, 2, -27, 42, -57, 7, 8, 9, 10, + 11, 12, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 43, 44, 45, 46, 47, 48, 49, -69, -68, -43, + -45, 2, 20, 19, 18, -34, -30, 59, -25, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, -73, + -72, -7, -74, 52, 2, 47, 52, -40, 2, -39, + -38, -13, -22, 67, 6, 16, 5, 6, 16, 5, + 55, -50, 52, -32, 36, -32, 36, -32, 36, -22, + -60, -58, 5, -22, -22, -60, -54, -70, -28, 62, + 59, 55, -64, 52, 51, -61, -59, -21, 5, 64, + 55, -68, 52, -32, -32, -32, -22, -28, 59, 55, + -72, 52, -22, 55, 55, 60, 52, -40, 2, 60, + 52, 53, 67, -19, 62, -18, -28, 2, -22, -22, + -22, -22, -22, -22, 51, 52, -15, 67, 60, 40, + 54, 51, 52, 52, 60, -23, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, -28, -21, + 52, 60, 40, 5, -22, -22, -22, 51, -28, -75, + 65, -17, 67, -19, 62, 68, -37, 2, -19, 62, + 2, -40, 68, 57, 68, 51, 51, 51, 51, 51, + 51, 5, 52, -9, -8, -14, -60, 5, 41, -53, + -52, -7, -42, -46, 2, -35, 36, -31, 59, -26, + 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 5, -54, + 60, 52, -15, -61, -21, 41, 51, 51, 51, 5, + 49, 48, -32, 68, -36, 2, -16, 62, 2, 68, + 68, 60, 2, 61, 61, -28, 5, 5, 5, 5, + 5, 5, 52, -15, 68, 60, 51, 55, -52, 52, + -22, -22, -28, 59, 52, -15, -32, 52, 5, 5, + 5, 52, -15, -75, -32, 66, 68, 68, 60, -40, + 2, 61, -37, 61, 68, 52, -15, 54, -15, 52, + -15, 54, -15, 52, -15, 54, -15, 52, -9, -16, + 51, 51, -28, 52, 61, 52, -15, 52, -15, 52, + -15, 52, 52, 54, 66, -36, 61, 61, 52, -51, + 54, 52, -51, 54, 52, -51, 54, 5, 5, 52, + 52, 52, -11, -10, -7, 52, 2, 60, 60, 55, + -51, 55, -51, 55, -51, 52, -15, 54, -15, 55, + -10, 52, -36, -36, 55, 55, 55, 52, -51, 54, + 55, -51, 55, } var protoDef = [...]int16{ - -2, -2, -2, -2, 3, 0, 0, 0, 20, 18, - 4, 5, 0, 0, -2, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, - 0, 19, 0, 78, 0, 8, 21, 0, 0, 21, - 30, 519, 520, 521, 522, 523, 524, 525, 526, 527, - 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, - 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, - 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, - 558, 559, 560, 561, 562, 0, 63, 0, 59, -2, - 56, 57, 0, 0, 0, 0, 122, 0, 32, 0, - 36, -2, 0, 24, 79, 25, 26, 21, 21, 29, - 0, 0, -2, 64, 58, 0, 21, 21, -2, 123, - 33, -2, 38, 21, 27, 28, 31, 21, 66, 67, - 68, 69, 70, 71, 0, 72, 0, 74, 65, 0, - -2, 0, -2, 0, -2, 252, 253, 254, 256, 0, - 0, 134, 135, 136, 126, 0, 41, 312, 313, 314, - 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, - 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, - 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, - 345, 346, 347, 348, 349, 350, 351, 0, -2, 55, - 0, 81, 82, -2, 85, 90, 0, 94, 0, 73, - 75, 76, 77, 21, -2, 228, 229, 230, 231, 232, - 233, 234, 235, 236, 237, 238, 239, 0, 0, 0, - 0, 0, 0, 204, 124, 0, 305, 39, 279, 280, + -2, -2, -2, -2, 0, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 16, 0, 0, 0, 0, 0, + 0, 0, -2, 5, 0, 15, 0, 0, 0, 48, + 0, 22, 380, 381, 382, 383, 384, 385, 386, 387, + 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, + 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, + 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, + 418, 419, 420, 421, 422, 0, 31, 33, 0, 0, + 0, 0, 87, 0, 0, 0, 18, 0, 0, 49, + 21, 0, 0, 0, 0, -2, -2, -2, 88, -2, + 17, 19, 20, 23, 0, 35, 36, 37, 38, 39, + -2, 40, 0, 0, 45, 32, 34, 0, 184, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 198, 0, 0, 0, 0, 0, 0, 0, 0, 164, + 89, 0, 253, 24, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 254, 255, 256, 257, 258, 259, 0, 172, 174, 175, + 176, 177, 179, 0, 0, 166, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 0, 201, 203, + 204, 206, 0, 0, 0, 0, 91, 0, 26, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, - 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, - 301, 302, 303, 304, 306, 307, 308, 309, 310, 311, - 21, -2, 217, 218, 219, 220, 221, 0, 0, 206, - 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, - 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, - 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, - 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, - 392, 393, 21, 251, 255, 0, 0, 130, 0, 45, - 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, - 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, - 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, - 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, - 473, 474, 475, 0, 0, 127, 21, -2, 265, 266, - 267, 268, 0, 80, 84, 86, 87, 88, 89, 0, - 0, 92, 105, 106, 107, 0, 0, 0, 0, 224, - 227, 0, 0, 21, 0, 191, 193, 0, 21, 0, - 21, 21, 209, 211, 0, 125, 0, 213, 216, 0, - 0, 196, 198, 201, 0, 248, 0, 0, 0, 131, - 0, 42, 261, 264, 0, 93, 98, 99, 100, 101, - 102, 0, 104, 0, 91, 0, 109, 0, 118, 0, - 120, 95, 0, 97, 0, 21, 0, 246, 21, 0, - 21, 21, 0, 0, -2, 0, 174, 21, 21, 207, - 0, 208, 0, 40, 0, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 21, 0, 21, - 0, 202, 0, 0, 46, 23, 0, 0, 103, 0, - 111, 0, 113, 115, 116, 108, 117, 0, 119, 0, - 0, 21, 0, 0, 244, 21, 21, 247, 0, 138, - 139, 0, 143, -2, 147, 189, 190, 192, 194, 195, - 0, -2, 158, 159, 160, 161, 163, 0, 0, 0, - 128, 0, 43, 394, 395, 396, 397, 398, 399, 400, - 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, - 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, - 431, 432, 21, 175, 203, 210, 212, 0, 222, 21, - 197, 205, 199, 200, 0, 23, 259, 23, 22, 0, - 0, 0, 132, 0, 47, 0, 51, -2, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, - 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, - 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, - 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, - 518, 110, 0, 112, 121, 96, 0, 0, 21, 21, - 245, 242, 21, 137, 140, -2, 145, 0, 21, 157, - 162, 0, 23, 0, 0, 129, 172, 21, 0, 223, - 21, 0, 257, 23, 260, 21, 0, 272, 133, 48, - -2, 53, 114, 21, 0, 21, 0, 240, 21, 243, - 146, 154, 0, 0, 166, 23, 0, 21, 0, 44, - 173, 176, 0, 21, 258, 269, 21, 271, 0, 21, - 152, 21, 241, 23, 23, 167, 0, 0, 21, 148, - 0, 0, -2, 21, 0, 153, 54, 164, 23, 21, - 0, 170, 0, 149, 21, -2, 276, 277, 278, 150, - 21, 165, 0, 21, 171, 270, 275, 151, 168, 0, - 169, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 0, + 209, 211, 212, 213, 215, 0, 30, 0, 60, -2, + 55, 0, 72, 0, 41, 44, 47, 42, 43, 46, + 182, 183, 197, 0, 409, 0, 409, 0, 409, 0, + 0, 151, 153, 0, 0, 0, 0, 168, 90, 0, + 0, 170, 171, 178, 0, 0, 156, 158, 161, 0, + 199, 200, 205, 0, 0, 0, 0, 92, 0, 207, + 208, 214, 0, 50, 51, 58, 59, 53, 60, 56, + 57, 0, 0, 69, -2, 78, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 149, 0, 0, 0, 0, + -2, 0, 163, 167, 0, 0, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 25, 0, + 165, 0, 0, 162, 0, 0, 0, 0, 27, 0, + 0, 61, 0, 68, -2, 62, 0, 0, 76, -2, + 60, 0, 73, 0, 75, 0, 0, 0, 0, 0, + 0, 0, 150, 0, 112, 0, 152, 154, 155, 0, + 123, 125, 126, 127, 129, 0, 366, 93, 0, 28, + 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, + 361, 362, 363, 364, 365, 367, 368, 369, 370, 371, + 372, 373, 374, 375, 376, 377, 378, 379, 0, 169, + 0, 180, 0, 157, 159, 160, 0, 0, 0, 0, + 0, 421, 0, 63, 0, 0, 81, -2, 60, 64, + 67, 0, 60, 71, 79, 0, 0, 0, 0, 0, + 0, 0, 101, 0, 111, 0, 0, 121, 122, 128, + 0, 0, 94, 0, 134, 0, 0, 181, 0, 0, + 0, 109, 0, 0, 0, 219, 65, 66, 0, 0, + 60, 70, 77, 80, 74, 95, 0, -2, 0, 96, + 0, -2, 0, 97, 0, -2, 0, 102, 113, 114, + 0, 0, 29, 135, 136, 103, 0, 104, 0, 105, + 0, 110, 216, -2, 218, 82, 83, 85, 98, 0, + -2, 99, 0, -2, 100, 0, -2, 0, 0, 106, + 107, 108, 0, 221, 223, 224, 226, 0, 0, 115, + 0, 116, 0, 117, 0, 130, 0, -2, 0, 217, + 220, 225, 84, 86, 118, 119, 120, 131, 0, -2, + 132, 0, 133, } var protoTok1 = [...]int8{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 77, 3, 75, 74, 73, 71, 3, - 66, 67, 70, 64, 61, 65, 60, 58, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 54, 53, - 63, 52, 62, 59, 76, 3, 3, 3, 3, 3, + 3, 3, 3, 76, 3, 74, 73, 72, 70, 3, + 65, 66, 69, 63, 60, 64, 59, 57, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 53, 52, + 62, 51, 61, 58, 75, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 68, 57, 69, 72, 3, 79, 3, 3, 3, + 3, 67, 56, 68, 71, 3, 78, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 55, 3, 56, 78, + 3, 3, 3, 54, 3, 55, 77, } var protoTok2 = [...]int8{ @@ -926,7 +867,7 @@ var protoTok2 = [...]int8{ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 42, 43, 44, 45, 46, 47, 48, 49, 50, } var protoTok3 = [...]int8{ @@ -1279,1380 +1220,1188 @@ protodefault: protoDollar = protoS[protopt-1 : protopt+1] { lex := protolex.(*protoLex) - protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, nil, lex.eof) + protoVAL.file = ast.NewFileNode(lex.info, nil, protoDollar[1].fileDecls, lex.eof) lex.res = protoVAL.file } case 3: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { lex := protolex.(*protoLex) - protoVAL.file = ast.NewFileNode(lex.info, nil, protoDollar[1].fileElements, lex.eof) + protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, protoDollar[2].fileDecls, lex.eof) lex.res = protoVAL.file } case 4: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-0 : protopt+1] { lex := protolex.(*protoLex) - protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, protoDollar[2].fileElements, lex.eof) + protoVAL.file = ast.NewFileNode(lex.info, nil, nil, lex.eof) lex.res = protoVAL.file } case 5: protoDollar = protoS[protopt-2 : protopt+1] { - lex := protolex.(*protoLex) - protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, protoDollar[2].fileElements, lex.eof) - lex.res = protoVAL.file + if protoDollar[2].fileDecl != nil { + protoVAL.fileDecls = append(protoDollar[1].fileDecls, protoDollar[2].fileDecl) + } else { + protoVAL.fileDecls = protoDollar[1].fileDecls + } } case 6: - protoDollar = protoS[protopt-0 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - lex := protolex.(*protoLex) - protoVAL.file = ast.NewFileNode(lex.info, nil, nil, lex.eof) - lex.res = protoVAL.file + if protoDollar[1].fileDecl != nil { + protoVAL.fileDecls = []ast.FileElement{protoDollar[1].fileDecl} + } else { + protoVAL.fileDecls = nil + } } case 7: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = prependRunes(toFileElement, protoDollar[1].bs, protoDollar[2].fileElements) + protoVAL.fileDecl = protoDollar[1].imprt } case 8: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = append(protoDollar[1].fileElements, protoDollar[2].fileElements...) + protoVAL.fileDecl = protoDollar[1].pkg } case 9: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = protoDollar[1].fileElements + protoVAL.fileDecl = protoDollar[1].opt } case 10: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].imprt.Node, protoDollar[1].imprt.Runes) + protoVAL.fileDecl = protoDollar[1].msg } case 11: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].pkg.Node, protoDollar[1].pkg.Runes) + protoVAL.fileDecl = protoDollar[1].en } case 12: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + protoVAL.fileDecl = protoDollar[1].extend } case 13: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes) + protoVAL.fileDecl = protoDollar[1].svc } case 14: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].en.Node, protoDollar[1].en.Runes) + protoVAL.fileDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } case 15: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes) + protoVAL.fileDecl = nil } case 16: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].svc.Node, protoDollar[1].svc.Runes) + protoVAL.fileDecl = nil } case 17: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.fileElements = nil + protoVAL.syn = ast.NewSyntaxNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].str.toStringValueNode(), protoDollar[4].b) } case 18: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.bs = []*ast.RuneNode{protoDollar[1].b} + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, nil, protoDollar[2].str.toStringValueNode(), protoDollar[3].b) } case 19: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.bs = append(protoDollar[1].bs, protoDollar[2].b) + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, protoDollar[2].id.ToKeyword(), protoDollar[3].str.toStringValueNode(), protoDollar[4].b) } case 20: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.bs = protoDollar[1].bs + protoVAL.imprt = ast.NewImportNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), nil, protoDollar[3].str.toStringValueNode(), protoDollar[4].b) } case 21: - protoDollar = protoS[protopt-0 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.bs = nil + protoVAL.pkg = ast.NewPackageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) } case 22: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.b = protoDollar[1].b + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} } case 23: - protoDollar = protoS[protopt-0 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protolex.(*protoLex).Error("syntax error: expecting ';'") - protoVAL.b = nil + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} } case 24: - protoDollar = protoS[protopt-4 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.syn = ast.NewSyntaxNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b) + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} } case 25: - protoDollar = protoS[protopt-4 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.ed = ast.NewEditionNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b) + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} } case 26: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, nil, toStringValueNode(protoDollar[2].str), semi), extra...) + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} } case 27: - protoDollar = protoS[protopt-4 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, protoDollar[2].id.ToKeyword(), toStringValueNode(protoDollar[3].str), semi), extra...) + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} } case 28: - protoDollar = protoS[protopt-4 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), nil, toStringValueNode(protoDollar[3].str), semi), extra...) + protoVAL.cid = &identList{protoDollar[1].id, nil, nil} } case 29: protoDollar = protoS[protopt-3 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.pkg = newNodeWithRunes(ast.NewPackageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].cid.toIdentValueNode(nil), semi), extra...) + protoVAL.cid = &identList{protoDollar[1].id, protoDollar[2].b, protoDollar[3].cid} } case 30: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + refs, dots := protoDollar[2].optNms.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + protoVAL.opt = ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, protoDollar[5].b) } case 31: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) - protoVAL.cid = protoDollar[1].cid + protoVAL.optNms = &fieldRefList{protoDollar[1].ref, nil, nil} } case 32: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + protoVAL.optNms = &fieldRefList{protoDollar[1].ref, protoDollar[2].b, protoDollar[3].optNms} } case 33: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) - protoVAL.cid = protoDollar[1].cid - } - case 34: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} - } - case 35: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) - protoVAL.cid = protoDollar[1].cid + protoVAL.ref = ast.NewFieldReferenceNode(protoDollar[1].id) } - case 36: - protoDollar = protoS[protopt-1 : protopt+1] + case 34: + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id) + protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b) } case 37: protoDollar = protoS[protopt-1 : protopt+1] { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - protoVAL.cidPart = protoDollar[1].cidPart - } - case 38: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b) + protoVAL.v = protoDollar[1].str.toStringValueNode() } case 39: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + protoVAL.v = protoDollar[1].id } case 40: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) - protoVAL.cid = protoDollar[1].cid + protoVAL.v = protoDollar[1].f } case 41: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) } case 42: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) - protoVAL.cid = protoDollar[1].cid + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) } case 43: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) } case 44: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) - protoVAL.cid = protoDollar[1].cid + f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) } case 45: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}} + protoVAL.v = protoDollar[1].i } case 46: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b) - protoVAL.cid = protoDollar[1].cid + protoVAL.v = ast.NewPositiveUintLiteralNode(protoDollar[1].b, protoDollar[2].i) } case 47: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + if protoDollar[2].i.Val > math.MaxInt64+1 { + // can't represent as int so treat as float literal + protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].i) + } else { + protoVAL.v = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) + } } case 48: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) - protoVAL.cid = protoDollar[1].cid + protoVAL.str = &stringList{protoDollar[1].s, nil} } case 49: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes} + protoVAL.str = &stringList{protoDollar[1].s, protoDollar[2].str} } case 50: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node) - protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...) - protoVAL.cid = protoDollar[1].cid + fields, delims := protoDollar[2].msgLit.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) } case 51: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id) + protoVAL.v = nil } case 52: protoDollar = protoS[protopt-1 : protopt+1] { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - protoVAL.cidPart = protoDollar[1].cidPart + if protoDollar[1].msgEntry != nil { + protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, nil} + } else { + protoVAL.msgLit = nil + } } case 53: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b) + if protoDollar[1].msgEntry != nil { + protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, protoDollar[2].msgLit} + } else { + protoVAL.msgLit = protoDollar[2].msgLit + } } case 54: - protoDollar = protoS[protopt-5 : protopt+1] + protoDollar = protoS[protopt-0 : protopt+1] { - optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots) - protoVAL.optRaw = ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, protoDollar[5].b) + protoVAL.msgLit = nil } case 55: - protoDollar = protoS[protopt-5 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots) - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) - protoVAL.opt = newNodeWithRunes(ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, semi), extra...) + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, nil} + } else { + protoVAL.msgEntry = nil + } } case 56: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id) + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} + } else { + protoVAL.msgEntry = nil + } } case 57: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.refRaw = protoDollar[1].refRaw + if protoDollar[1].msgField != nil { + protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} + } else { + protoVAL.msgEntry = nil + } } case 58: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw, protoDollar[2].b) + protoVAL.msgEntry = nil } case 59: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw) + protoVAL.msgEntry = nil } case 60: protoDollar = protoS[protopt-1 : protopt+1] { - protolex.(*protoLex).Error("syntax error: unexpected '.'") - protoVAL.ref = protoDollar[1].ref + protoVAL.msgEntry = nil } case 61: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes} + if protoDollar[1].ref != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) + } else { + protoVAL.msgField = nil + } } case 62: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node) - protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...) - protoVAL.optNms = protoDollar[1].optNms + if protoDollar[1].ref != nil { + val := ast.NewArrayLiteralNode(protoDollar[2].b, nil, nil, protoDollar[3].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } } case 63: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes} + if protoDollar[1].ref != nil { + val := ast.NewArrayLiteralNode(protoDollar[3].b, nil, nil, protoDollar[4].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, val) + } else { + protoVAL.msgField = nil + } } case 64: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node) - protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...) - protoVAL.optNms = protoDollar[1].optNms + if protoDollar[1].ref != nil { + vals, commas := protoDollar[3].sl.toNodes() + val := ast.NewArrayLiteralNode(protoDollar[2].b, vals, commas, protoDollar[4].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } } case 65: - protoDollar = protoS[protopt-3 : protopt+1] + protoDollar = protoS[protopt-5 : protopt+1] + { + if protoDollar[1].ref != nil { + vals, commas := protoDollar[4].sl.toNodes() + val := ast.NewArrayLiteralNode(protoDollar[3].b, vals, commas, protoDollar[5].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, val) + } else { + protoVAL.msgField = nil + } + } + case 66: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b) + protoVAL.msgField = nil + } + case 67: + protoDollar = protoS[protopt-4 : protopt+1] + { + protoVAL.msgField = nil } case 68: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] + { + if protoDollar[1].ref != nil && protoDollar[3].v != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) + } else { + protoVAL.msgField = nil + } + } + case 69: + protoDollar = protoS[protopt-2 : protopt+1] + { + if protoDollar[1].ref != nil && protoDollar[2].v != nil { + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, protoDollar[2].v) + } else { + protoVAL.msgField = nil + } + } + case 70: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.v = toStringValueNode(protoDollar[1].str) + protoVAL.msgField = nil } case 71: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.v = protoDollar[1].id + protoVAL.msgField = nil } case 72: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.v = protoDollar[1].f + protoVAL.ref = ast.NewFieldReferenceNode(protoDollar[1].id) } case 73: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) + protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) } case 74: - protoDollar = protoS[protopt-1 : protopt+1] + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.v = protoDollar[1].i + protoVAL.ref = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b) } case 75: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - if protoDollar[2].i.Val > math.MaxInt64+1 { - // can't represent as int so treat as float literal - protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].i) - } else { - protoVAL.v = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) - } + protoVAL.ref = nil } case 76: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-1 : protopt+1] { - f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) - protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + if protoDollar[1].v == nil { + protoVAL.sl = nil + } else { + protoVAL.sl = &valueList{protoDollar[1].v, nil, nil} + } } case 77: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) - protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + if protoDollar[1].v == nil { + protoVAL.sl = nil + } else { + protoVAL.sl = &valueList{protoDollar[1].v, protoDollar[2].b, protoDollar[3].sl} + } } case 78: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.str = []*ast.StringLiteralNode{protoDollar[1].s} + protoVAL.v = protoDollar[1].v } case 79: - protoDollar = protoS[protopt-2 : protopt+1] + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.str = append(protoDollar[1].str, protoDollar[2].s) + fields, delims := protoDollar[2].msgLit.toNodes() + protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) } case 80: protoDollar = protoS[protopt-3 : protopt+1] { - if protoDollar[2].msgLitFlds == nil { - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) - } else { - fields, delimiters := protoDollar[2].msgLitFlds.toNodes() - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b) - } + protoVAL.v = nil } case 81: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) - } - case 84: - protoDollar = protoS[protopt-2 : protopt+1] - { - if protoDollar[1].msgLitFlds != nil { - protoDollar[1].msgLitFlds.next = protoDollar[2].msgLitFlds - protoVAL.msgLitFlds = protoDollar[1].msgLitFlds - } else { - protoVAL.msgLitFlds = protoDollar[2].msgLitFlds - } - } - case 85: - protoDollar = protoS[protopt-1 : protopt+1] - { - if protoDollar[1].msgLitFld != nil { - protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld} - } else { - protoVAL.msgLitFlds = nil - } - } - case 86: - protoDollar = protoS[protopt-2 : protopt+1] - { - if protoDollar[1].msgLitFld != nil { - protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b} - } else { - protoVAL.msgLitFlds = nil - } - } - case 87: - protoDollar = protoS[protopt-2 : protopt+1] - { - if protoDollar[1].msgLitFld != nil { - protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b} - } else { - protoVAL.msgLitFlds = nil - } - } - case 88: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.msgLitFlds = nil - } - case 89: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.msgLitFlds = nil - } - case 90: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgLitFlds = nil + protoVAL.sl = &valueList{protoDollar[1].v, nil, nil} } - case 91: + case 82: protoDollar = protoS[protopt-3 : protopt+1] { - if protoDollar[1].refRaw != nil && protoDollar[2].b != nil { - protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, protoDollar[2].b, protoDollar[3].v) - } else { - protoVAL.msgLitFld = nil - } - } - case 92: - protoDollar = protoS[protopt-2 : protopt+1] - { - if protoDollar[1].refRaw != nil && protoDollar[2].v != nil { - protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, nil, protoDollar[2].v) - } else { - protoVAL.msgLitFld = nil - } + protoVAL.sl = &valueList{protoDollar[1].v, protoDollar[2].b, protoDollar[3].sl} } - case 93: + case 83: protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.msgLitFld = nil + fields, delims := protoDollar[2].msgLit.toNodes() + msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + protoVAL.sl = &valueList{msg, nil, nil} } - case 94: - protoDollar = protoS[protopt-1 : protopt+1] + case 84: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id) + fields, delims := protoDollar[2].msgLit.toNodes() + msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) + protoVAL.sl = &valueList{msg, protoDollar[4].b, protoDollar[5].sl} } - case 95: + case 85: protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) + protoVAL.sl = nil } - case 96: + case 86: protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.refRaw = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b) - } - case 97: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoVAL.refRaw = nil + protoVAL.sl = protoDollar[5].sl } - case 101: + case 87: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.v = toStringValueNode(protoDollar[1].str) + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 103: + case 88: protoDollar = protoS[protopt-2 : protopt+1] { - kw := protoDollar[2].id.ToKeyword() - switch strings.ToLower(kw.Val) { - case "inf", "infinity", "nan": - // these are acceptable - default: - // anything else is not - protolex.(*protoLex).Error(`only identifiers "inf", "infinity", or "nan" may appear after negative sign`) - } - // we'll validate the identifier later - f := ast.NewSpecialFloatLiteralNode(kw) - protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 104: + case 89: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.v = protoDollar[1].id - } - case 108: - protoDollar = protoS[protopt-3 : protopt+1] - { - if protoDollar[2].msgLitFlds == nil { - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) - } else { - fields, delimiters := protoDollar[2].msgLitFlds.toNodes() - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b) - } - } - case 109: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) - } - case 110: - protoDollar = protoS[protopt-3 : protopt+1] - { - if protoDollar[2].sl == nil { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) - } else { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b) - } + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 111: + case 90: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) - } - case 112: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 113: + case 91: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}} - } - case 114: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v) - protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b) - protoVAL.sl = protoDollar[1].sl - } - case 117: - protoDollar = protoS[protopt-3 : protopt+1] - { - if protoDollar[2].sl == nil { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) - } else { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b) - } + protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 118: + case 92: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b) - } - case 119: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b) - } - case 120: - protoDollar = protoS[protopt-1 : protopt+1] - { - protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}} - } - case 121: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v) - protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b) - protoVAL.sl = protoDollar[1].sl + protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 122: + case 93: protoDollar = protoS[protopt-1 : protopt+1] { protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 123: + case 94: protoDollar = protoS[protopt-2 : protopt+1] { protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 124: - protoDollar = protoS[protopt-1 : protopt+1] + case 95: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 125: - protoDollar = protoS[protopt-2 : protopt+1] + case 96: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 126: - protoDollar = protoS[protopt-1 : protopt+1] + case 97: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 127: - protoDollar = protoS[protopt-2 : protopt+1] + case 98: + protoDollar = protoS[protopt-7 : protopt+1] { - protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 128: - protoDollar = protoS[protopt-1 : protopt+1] + case 99: + protoDollar = protoS[protopt-7 : protopt+1] { - protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 129: - protoDollar = protoS[protopt-2 : protopt+1] + case 100: + protoDollar = protoS[protopt-7 : protopt+1] { - protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 130: - protoDollar = protoS[protopt-1 : protopt+1] + case 101: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 131: - protoDollar = protoS[protopt-2 : protopt+1] + case 102: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 132: - protoDollar = protoS[protopt-1 : protopt+1] + case 103: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 133: - protoDollar = protoS[protopt-2 : protopt+1] + case 104: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 137: - protoDollar = protoS[protopt-3 : protopt+1] + case 105: + protoDollar = protoS[protopt-6 : protopt+1] { - protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, protoDollar[2].opts.options, protoDollar[2].opts.commas, protoDollar[3].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 138: - protoDollar = protoS[protopt-2 : protopt+1] + case 106: + protoDollar = protoS[protopt-7 : protopt+1] { - protolex.(*protoLex).Error("compact options must have at least one option") - protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, nil, nil, protoDollar[2].b) + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 139: - protoDollar = protoS[protopt-1 : protopt+1] + case 107: + protoDollar = protoS[protopt-7 : protopt+1] { - protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes} + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 140: - protoDollar = protoS[protopt-2 : protopt+1] + case 108: + protoDollar = protoS[protopt-7 : protopt+1] { - protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node) - protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...) - protoVAL.opts = protoDollar[1].opts + protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 141: - protoDollar = protoS[protopt-1 : protopt+1] + case 109: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes} + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 142: - protoDollar = protoS[protopt-2 : protopt+1] + case 110: + protoDollar = protoS[protopt-6 : protopt+1] { - protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node) - protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...) - protoVAL.opts = protoDollar[1].opts + protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 143: - protoDollar = protoS[protopt-1 : protopt+1] + case 111: + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw) + opts, commas := protoDollar[2].opts.toNodes() + protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, opts, commas, protoDollar[3].b) } - case 144: + case 112: protoDollar = protoS[protopt-1 : protopt+1] { - protolex.(*protoLex).Error("syntax error: unexpected ','") - protoVAL.opt = protoDollar[1].opt + protoVAL.opts = &compactOptionList{protoDollar[1].opt, nil, nil} } - case 145: - protoDollar = protoS[protopt-2 : protopt+1] + case 113: + protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw, protoDollar[2].b) + protoVAL.opts = &compactOptionList{protoDollar[1].opt, protoDollar[2].b, protoDollar[3].opts} } - case 146: + case 114: protoDollar = protoS[protopt-3 : protopt+1] { - optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots) - protoVAL.optRaw = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v) + refs, dots := protoDollar[1].optNms.toNodes() + optName := ast.NewOptionNameNode(refs, dots) + protoVAL.opt = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v) } - case 147: - protoDollar = protoS[protopt-1 : protopt+1] + case 115: + protoDollar = protoS[protopt-8 : protopt+1] { - optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots) - protolex.(*protoLex).Error("compact option must have a value") - protoVAL.optRaw = ast.NewCompactOptionNode(optName, nil, nil) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 148: + case 116: protoDollar = protoS[protopt-8 : protopt+1] { - protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 149: - protoDollar = protoS[protopt-9 : protopt+1] + case 117: + protoDollar = protoS[protopt-8 : protopt+1] { - protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 150: + case 118: protoDollar = protoS[protopt-9 : protopt+1] { - protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b), protoDollar[9].bs...) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 151: - protoDollar = protoS[protopt-10 : protopt+1] - { - protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b), protoDollar[10].bs...) - } - case 152: - protoDollar = protoS[protopt-7 : protopt+1] - { - protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, nil, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b), protoDollar[7].bs...) - } - case 153: - protoDollar = protoS[protopt-8 : protopt+1] + case 119: + protoDollar = protoS[protopt-9 : protopt+1] { - protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b), protoDollar[8].bs...) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 154: - protoDollar = protoS[protopt-6 : protopt+1] + case 120: + protoDollar = protoS[protopt-9 : protopt+1] { - protoVAL.oo = newNodeWithRunes(ast.NewOneofNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooElements, protoDollar[5].b), protoDollar[6].bs...) + protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 155: - protoDollar = protoS[protopt-0 : protopt+1] + case 121: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.ooElements = nil + protoVAL.oo = ast.NewOneOfNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooDecls, protoDollar[5].b) } - case 157: + case 122: protoDollar = protoS[protopt-2 : protopt+1] { - if protoDollar[2].ooElement != nil { - protoVAL.ooElements = append(protoDollar[1].ooElements, protoDollar[2].ooElement) + if protoDollar[2].ooDecl != nil { + protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecl) } else { - protoVAL.ooElements = protoDollar[1].ooElements + protoVAL.ooDecls = protoDollar[1].ooDecls } } - case 158: + case 123: protoDollar = protoS[protopt-1 : protopt+1] { - if protoDollar[1].ooElement != nil { - protoVAL.ooElements = []ast.OneofElement{protoDollar[1].ooElement} + if protoDollar[1].ooDecl != nil { + protoVAL.ooDecls = []ast.OneOfElement{protoDollar[1].ooDecl} } else { - protoVAL.ooElements = nil + protoVAL.ooDecls = nil } } - case 159: + case 124: + protoDollar = protoS[protopt-0 : protopt+1] + { + protoVAL.ooDecls = nil + } + case 125: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.ooElement = protoDollar[1].optRaw + protoVAL.ooDecl = protoDollar[1].opt } - case 160: + case 126: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.ooElement = protoDollar[1].fld + protoVAL.ooDecl = protoDollar[1].fld } - case 161: + case 127: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.ooElement = protoDollar[1].grp + protoVAL.ooDecl = protoDollar[1].grp } - case 162: + case 128: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.ooElement = nil + protoVAL.ooDecl = nil } - case 163: + case 129: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.ooElement = nil + protoVAL.ooDecl = nil } - case 164: + case 130: protoDollar = protoS[protopt-5 : protopt+1] { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 165: + case 131: protoDollar = protoS[protopt-6 : protopt+1] { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 166: - protoDollar = protoS[protopt-3 : protopt+1] - { - protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, protoDollar[3].b) - } - case 167: - protoDollar = protoS[protopt-4 : protopt+1] - { - protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b) - } - case 168: + case 132: protoDollar = protoS[protopt-7 : protopt+1] { - protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b) + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgDecls, protoDollar[7].b) } - case 169: + case 133: protoDollar = protoS[protopt-8 : protopt+1] { - protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b) + protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 170: - protoDollar = protoS[protopt-5 : protopt+1] - { - protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, nil, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b) - } - case 171: - protoDollar = protoS[protopt-6 : protopt+1] - { - protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b) - } - case 172: + case 134: protoDollar = protoS[protopt-5 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) - protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semi), extra...) + protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 173: + case 135: protoDollar = protoS[protopt-6 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) - protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semi), extra...) - } - case 174: - protoDollar = protoS[protopt-3 : protopt+1] - { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, nil, semi), extra...) - } - case 175: - protoDollar = protoS[protopt-4 : protopt+1] - { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semi), extra...) + protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 176: + case 136: protoDollar = protoS[protopt-6 : protopt+1] { protoVAL.mapType = ast.NewMapTypeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].id, protoDollar[4].b, protoDollar[5].tid, protoDollar[6].b) } - case 189: - protoDollar = protoS[protopt-4 : protopt+1] + case 149: + protoDollar = protoS[protopt-3 : protopt+1] { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `extensions 1 to 10` and `extensions 1` followed by `to = 10`. - protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, nil, protoDollar[3].b), protoDollar[4].bs...) + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, nil, protoDollar[3].b) } - case 190: + case 150: protoDollar = protoS[protopt-4 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].cmpctOpts, semi), extra...) + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].cmpctOpts, protoDollar[4].b) } - case 191: + case 151: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}} + protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} } - case 192: + case 152: protoDollar = protoS[protopt-3 : protopt+1] { - protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng) - protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b) - protoVAL.rngs = protoDollar[1].rngs + protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} } - case 193: + case 153: protoDollar = protoS[protopt-1 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, nil, nil, nil) } - case 194: + case 154: protoDollar = protoS[protopt-3 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), protoDollar[3].i, nil) } - case 195: + case 155: protoDollar = protoS[protopt-3 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) } - case 196: + case 156: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}} + protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} } - case 197: + case 157: protoDollar = protoS[protopt-3 : protopt+1] { - protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng) - protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b) - protoVAL.rngs = protoDollar[1].rngs + protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} } - case 198: + case 158: protoDollar = protoS[protopt-1 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, nil, nil, nil) } - case 199: + case 159: protoDollar = protoS[protopt-3 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), protoDollar[3].il, nil) } - case 200: + case 160: protoDollar = protoS[protopt-3 : protopt+1] { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) } - case 201: + case 161: protoDollar = protoS[protopt-1 : protopt+1] { protoVAL.il = protoDollar[1].i } - case 202: + case 162: protoDollar = protoS[protopt-2 : protopt+1] { protoVAL.il = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) } - case 203: - protoDollar = protoS[protopt-4 : protopt+1] - { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. - protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...) - } - case 205: - protoDollar = protoS[protopt-4 : protopt+1] + case 163: + protoDollar = protoS[protopt-3 : protopt+1] { - // TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict - // between `reserved 1 to 10` and `reserved 1` followed by `to = 10`. - protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...) + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) } - case 207: + case 165: protoDollar = protoS[protopt-3 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.resvd = newNodeWithRunes(ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.names, protoDollar[2].names.commas, semi), extra...) + ranges, commas := protoDollar[2].rngs.toNodes() + protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) } - case 208: + case 167: protoDollar = protoS[protopt-3 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.resvd = newNodeWithRunes(ast.NewReservedIdentifiersNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.idents, protoDollar[2].names.commas, semi), extra...) + names, commas := protoDollar[2].names.toNodes() + protoVAL.resvd = ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), names, commas, protoDollar[3].b) } - case 209: + case 168: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.names = &nameSlices{names: []ast.StringValueNode{toStringValueNode(protoDollar[1].str)}} + protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), nil, nil} } - case 210: + case 169: protoDollar = protoS[protopt-3 : protopt+1] { - protoDollar[1].names.names = append(protoDollar[1].names.names, toStringValueNode(protoDollar[3].str)) - protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b) - protoVAL.names = protoDollar[1].names + protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), protoDollar[2].b, protoDollar[3].names} } - case 211: - protoDollar = protoS[protopt-1 : protopt+1] - { - protoVAL.names = &nameSlices{idents: []*ast.IdentNode{protoDollar[1].id}} - } - case 212: - protoDollar = protoS[protopt-3 : protopt+1] + case 170: + protoDollar = protoS[protopt-5 : protopt+1] { - protoDollar[1].names.idents = append(protoDollar[1].names.idents, protoDollar[3].id) - protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b) - protoVAL.names = protoDollar[1].names + protoVAL.en = ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enDecls, protoDollar[5].b) } - case 213: - protoDollar = protoS[protopt-6 : protopt+1] + case 171: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.en = newNodeWithRunes(ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enElements, protoDollar[5].b), protoDollar[6].bs...) + if protoDollar[2].enDecl != nil { + protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecl) + } else { + protoVAL.enDecls = protoDollar[1].enDecls + } } - case 214: + case 172: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, nil) + if protoDollar[1].enDecl != nil { + protoVAL.enDecls = []ast.EnumElement{protoDollar[1].enDecl} + } else { + protoVAL.enDecls = nil + } } - case 215: - protoDollar = protoS[protopt-2 : protopt+1] + case 173: + protoDollar = protoS[protopt-0 : protopt+1] { - protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, protoDollar[2].enElements) + protoVAL.enDecls = nil } - case 216: - protoDollar = protoS[protopt-2 : protopt+1] + case 174: + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = append(protoDollar[1].enElements, protoDollar[2].enElements...) + protoVAL.enDecl = protoDollar[1].opt } - case 217: + case 175: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = protoDollar[1].enElements + protoVAL.enDecl = protoDollar[1].env } - case 218: + case 176: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + protoVAL.enDecl = protoDollar[1].resvd } - case 219: + case 177: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].env.Node, protoDollar[1].env.Runes) + protoVAL.enDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 220: - protoDollar = protoS[protopt-1 : protopt+1] + case 178: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes) + protoVAL.enDecl = nil } - case 221: + case 179: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.enElements = nil + protoVAL.enDecl = nil } - case 222: + case 180: protoDollar = protoS[protopt-4 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, semi), extra...) + protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, protoDollar[4].b) } - case 223: + case 181: protoDollar = protoS[protopt-5 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) - protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, semi), extra...) - } - case 224: - protoDollar = protoS[protopt-6 : protopt+1] - { - protoVAL.msg = newNodeWithRunes(ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b), protoDollar[6].bs...) - } - case 225: - protoDollar = protoS[protopt-1 : protopt+1] - { - protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, nil) + protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, protoDollar[5].b) } - case 226: - protoDollar = protoS[protopt-2 : protopt+1] + case 182: + protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, protoDollar[2].msgElements) + protoVAL.msg = ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgDecls, protoDollar[5].b) } - case 227: + case 183: protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.msgElements = append(protoDollar[1].msgElements, protoDollar[2].msgElements...) + if protoDollar[2].msgDecl != nil { + protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecl) + } else { + protoVAL.msgDecls = protoDollar[1].msgDecls + } } - case 228: + case 184: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = protoDollar[1].msgElements + if protoDollar[1].msgDecl != nil { + protoVAL.msgDecls = []ast.MessageElement{protoDollar[1].msgDecl} + } else { + protoVAL.msgDecls = nil + } } - case 229: - protoDollar = protoS[protopt-1 : protopt+1] + case 185: + protoDollar = protoS[protopt-0 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgFld.Node, protoDollar[1].msgFld.Runes) + protoVAL.msgDecls = nil } - case 230: + case 186: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].en.Node, protoDollar[1].en.Runes) + protoVAL.msgDecl = protoDollar[1].fld } - case 231: + case 187: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes) + protoVAL.msgDecl = protoDollar[1].en } - case 232: + case 188: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes) + protoVAL.msgDecl = protoDollar[1].msg } - case 233: + case 189: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].ext.Node, protoDollar[1].ext.Runes) + protoVAL.msgDecl = protoDollar[1].extend } - case 234: + case 190: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgGrp.Node, protoDollar[1].msgGrp.Runes) + protoVAL.msgDecl = protoDollar[1].ext } - case 235: + case 191: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + protoVAL.msgDecl = protoDollar[1].grp } - case 236: + case 192: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].oo.Node, protoDollar[1].oo.Runes) + protoVAL.msgDecl = protoDollar[1].opt } - case 237: + case 193: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].mapFld.Node, protoDollar[1].mapFld.Runes) + protoVAL.msgDecl = protoDollar[1].oo } - case 238: + case 194: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes) + protoVAL.msgDecl = protoDollar[1].mapFld } - case 239: + case 195: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.msgElements = nil + protoVAL.msgDecl = protoDollar[1].resvd } - case 240: - protoDollar = protoS[protopt-6 : protopt+1] - { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, semis), extra...) - } - case 241: - protoDollar = protoS[protopt-7 : protopt+1] - { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[7].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, semis), extra...) - } - case 242: - protoDollar = protoS[protopt-5 : protopt+1] + case 196: + protoDollar = protoS[protopt-1 : protopt+1] { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semis), extra...) + protoVAL.msgDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 243: - protoDollar = protoS[protopt-6 : protopt+1] + case 197: + protoDollar = protoS[protopt-2 : protopt+1] { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semis), extra...) + protoVAL.msgDecl = nil } - case 244: - protoDollar = protoS[protopt-4 : protopt+1] + case 198: + protoDollar = protoS[protopt-1 : protopt+1] { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, nil, semis), extra...) + protoVAL.msgDecl = nil } - case 245: + case 199: protoDollar = protoS[protopt-5 : protopt+1] { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, semis), extra...) + protoVAL.extend = ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extDecls, protoDollar[5].b) } - case 246: - protoDollar = protoS[protopt-3 : protopt+1] - { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, semis), extra...) - } - case 247: - protoDollar = protoS[protopt-4 : protopt+1] - { - semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs) - protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semis), extra...) - } - case 248: - protoDollar = protoS[protopt-6 : protopt+1] - { - protoVAL.extend = newNodeWithRunes(ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extElements, protoDollar[5].b), protoDollar[6].bs...) - } - case 249: - protoDollar = protoS[protopt-0 : protopt+1] - { - protoVAL.extElements = nil - } - case 251: + case 200: protoDollar = protoS[protopt-2 : protopt+1] { - if protoDollar[2].extElement != nil { - protoVAL.extElements = append(protoDollar[1].extElements, protoDollar[2].extElement) + if protoDollar[2].extDecl != nil { + protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecl) } else { - protoVAL.extElements = protoDollar[1].extElements + protoVAL.extDecls = protoDollar[1].extDecls } } - case 252: + case 201: protoDollar = protoS[protopt-1 : protopt+1] { - if protoDollar[1].extElement != nil { - protoVAL.extElements = []ast.ExtendElement{protoDollar[1].extElement} + if protoDollar[1].extDecl != nil { + protoVAL.extDecls = []ast.ExtendElement{protoDollar[1].extDecl} } else { - protoVAL.extElements = nil + protoVAL.extDecls = nil } } - case 253: - protoDollar = protoS[protopt-1 : protopt+1] + case 202: + protoDollar = protoS[protopt-0 : protopt+1] { - protoVAL.extElement = protoDollar[1].fld + protoVAL.extDecls = nil } - case 254: + case 203: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.extElement = protoDollar[1].grp - } - case 255: - protoDollar = protoS[protopt-2 : protopt+1] - { - protoVAL.extElement = nil + protoVAL.extDecl = protoDollar[1].fld } - case 256: + case 204: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.extElement = nil + protoVAL.extDecl = protoDollar[1].grp } - case 257: - protoDollar = protoS[protopt-6 : protopt+1] + case 205: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) + protoVAL.extDecl = nil } - case 258: - protoDollar = protoS[protopt-7 : protopt+1] + case 206: + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) + protoVAL.extDecl = nil } - case 259: + case 207: protoDollar = protoS[protopt-5 : protopt+1] { - protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) - } - case 260: - protoDollar = protoS[protopt-6 : protopt+1] - { - protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) + protoVAL.svc = ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcDecls, protoDollar[5].b) } - case 261: - protoDollar = protoS[protopt-6 : protopt+1] + case 208: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.svc = newNodeWithRunes(ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcElements, protoDollar[5].b), protoDollar[6].bs...) + if protoDollar[2].svcDecl != nil { + protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecl) + } else { + protoVAL.svcDecls = protoDollar[1].svcDecls + } } - case 262: + case 209: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, nil) + if protoDollar[1].svcDecl != nil { + protoVAL.svcDecls = []ast.ServiceElement{protoDollar[1].svcDecl} + } else { + protoVAL.svcDecls = nil + } } - case 263: - protoDollar = protoS[protopt-2 : protopt+1] + case 210: + protoDollar = protoS[protopt-0 : protopt+1] { - protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, protoDollar[2].svcElements) + protoVAL.svcDecls = nil } - case 264: - protoDollar = protoS[protopt-2 : protopt+1] + case 211: + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.svcElements = append(protoDollar[1].svcElements, protoDollar[2].svcElements...) + protoVAL.svcDecl = protoDollar[1].opt } - case 265: + case 212: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.svcElements = protoDollar[1].svcElements + protoVAL.svcDecl = protoDollar[1].mtd } - case 266: + case 213: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + protoVAL.svcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 267: - protoDollar = protoS[protopt-1 : protopt+1] + case 214: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].mtd.Node, protoDollar[1].mtd.Runes) + protoVAL.svcDecl = nil } - case 268: + case 215: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.svcElements = nil + protoVAL.svcDecl = nil } - case 269: + case 216: protoDollar = protoS[protopt-6 : protopt+1] { - semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs) - protoVAL.mtd = newNodeWithRunes(ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, semi), extra...) + protoVAL.mtd = ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b) } - case 270: - protoDollar = protoS[protopt-9 : protopt+1] + case 217: + protoDollar = protoS[protopt-8 : protopt+1] { - protoVAL.mtd = newNodeWithRunes(ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, protoDollar[6].b, protoDollar[7].mtdElements, protoDollar[8].b), protoDollar[9].bs...) + protoVAL.mtd = ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b, protoDollar[7].rpcDecls, protoDollar[8].b) } - case 271: + case 218: protoDollar = protoS[protopt-4 : protopt+1] { - protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b) + protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b) } - case 272: + case 219: protoDollar = protoS[protopt-3 : protopt+1] { - protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b) + protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b) } - case 273: - protoDollar = protoS[protopt-1 : protopt+1] + case 220: + protoDollar = protoS[protopt-2 : protopt+1] { - protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, nil) + if protoDollar[2].rpcDecl != nil { + protoVAL.rpcDecls = append(protoDollar[1].rpcDecls, protoDollar[2].rpcDecl) + } else { + protoVAL.rpcDecls = protoDollar[1].rpcDecls + } } - case 274: - protoDollar = protoS[protopt-2 : protopt+1] + case 221: + protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, protoDollar[2].mtdElements) + if protoDollar[1].rpcDecl != nil { + protoVAL.rpcDecls = []ast.RPCElement{protoDollar[1].rpcDecl} + } else { + protoVAL.rpcDecls = nil + } } - case 275: - protoDollar = protoS[protopt-2 : protopt+1] + case 222: + protoDollar = protoS[protopt-0 : protopt+1] { - protoVAL.mtdElements = append(protoDollar[1].mtdElements, protoDollar[2].mtdElements...) + protoVAL.rpcDecls = nil } - case 276: + case 223: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.mtdElements = protoDollar[1].mtdElements + protoVAL.rpcDecl = protoDollar[1].opt } - case 277: + case 224: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.mtdElements = toElements[ast.RPCElement](toMethodElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes) + protoVAL.rpcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 278: + case 225: + protoDollar = protoS[protopt-2 : protopt+1] + { + protoVAL.rpcDecl = nil + } + case 226: protoDollar = protoS[protopt-1 : protopt+1] { - protoVAL.mtdElements = nil + protoVAL.rpcDecl = nil } } goto protostack /* stack new state and value */ diff --git a/vendor/github.com/bufbuild/protocompile/parser/result.go b/vendor/github.com/bufbuild/protocompile/parser/result.go index 4aa83e7d..89afa2a1 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/result.go +++ b/vendor/github.com/bufbuild/protocompile/parser/result.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,15 @@ package parser import ( "bytes" - "fmt" "math" - "sort" "strings" "unicode" "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" - "github.com/bufbuild/protocompile/internal/editions" "github.com/bufbuild/protocompile/reporter" ) @@ -36,15 +32,14 @@ type result struct { file *ast.FileNode proto *descriptorpb.FileDescriptorProto - nodes map[proto.Message]ast.Node - ifNoAST *ast.NoSourceNode + nodes map[proto.Message]ast.Node } // ResultWithoutAST returns a parse result that has no AST. All methods for // looking up AST nodes return a placeholder node that contains only the filename // in position information. func ResultWithoutAST(proto *descriptorpb.FileDescriptorProto) Result { - return &result{proto: proto, ifNoAST: ast.NewNoSourceNode(proto.GetName())} + return &result{proto: proto} } // ResultFromAST constructs a descriptor proto from the given AST. The returned @@ -89,47 +84,24 @@ func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handl r.putFileNode(fd, file) - var syntax protoreflect.Syntax - switch { - case file.Syntax != nil: - switch file.Syntax.Syntax.AsString() { - case "proto3": - syntax = protoreflect.Proto3 - case "proto2": - syntax = protoreflect.Proto2 - default: + isProto3 := false + if file.Syntax != nil { + if file.Syntax.Syntax.AsString() == "proto3" { + isProto3 = true + } else if file.Syntax.Syntax.AsString() != "proto2" { nodeInfo := file.NodeInfo(file.Syntax.Syntax) - if handler.HandleErrorf(nodeInfo, `syntax value must be "proto2" or "proto3"`) != nil { + if handler.HandleErrorf(nodeInfo.Start(), `syntax value must be "proto2" or "proto3"`) != nil { return } } - // proto2 is the default, so no need to set for that value - if syntax != protoreflect.Proto2 { + // proto2 is the default, so no need to set unless proto3 + if isProto3 { fd.Syntax = proto.String(file.Syntax.Syntax.AsString()) } - case file.Edition != nil: - edition := file.Edition.Edition.AsString() - syntax = protoreflect.Editions - - fd.Syntax = proto.String("editions") - editionEnum, ok := editions.SupportedEditions[edition] - if !ok { - nodeInfo := file.NodeInfo(file.Edition.Edition) - editionStrs := make([]string, 0, len(editions.SupportedEditions)) - for supportedEdition := range editions.SupportedEditions { - editionStrs = append(editionStrs, fmt.Sprintf("%q", supportedEdition)) - } - sort.Strings(editionStrs) - if handler.HandleErrorf(nodeInfo, `edition value %q not recognized; should be one of [%s]`, edition, strings.Join(editionStrs, ",")) != nil { - return - } - } - fd.Edition = editionEnum.Enum() - default: - syntax = protoreflect.Proto2 + } else { nodeInfo := file.NodeInfo(file) - handler.HandleWarningWithPos(nodeInfo, ErrNoSyntax) + handler.HandleWarningWithPos(nodeInfo.Start(), ErrNoSyntax) } for _, decl := range file.Decls { @@ -138,9 +110,9 @@ func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handl } switch decl := decl.(type) { case *ast.EnumNode: - fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl, syntax, handler)) + fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl, handler)) case *ast.ExtendNode: - r.addExtensions(decl, &fd.Extension, &fd.MessageType, syntax, handler, 0) + r.addExtensions(decl, &fd.Extension, &fd.MessageType, isProto3, handler, 0) case *ast.ImportNode: index := len(fd.Dependency) fd.Dependency = append(fd.Dependency, decl.Name.AsString()) @@ -150,7 +122,7 @@ func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handl fd.WeakDependency = append(fd.WeakDependency, int32(index)) } case *ast.MessageNode: - fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl, syntax, handler, 1)) + fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl, isProto3, handler, 1)) case *ast.OptionNode: if fd.Options == nil { fd.Options = &descriptorpb.FileOptions{} @@ -161,20 +133,20 @@ func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handl case *ast.PackageNode: if fd.Package != nil { nodeInfo := file.NodeInfo(decl) - if handler.HandleErrorf(nodeInfo, "files should have only one package declaration") != nil { + if handler.HandleErrorf(nodeInfo.Start(), "files should have only one package declaration") != nil { return } } pkgName := string(decl.Name.AsIdentifier()) if len(pkgName) >= 512 { nodeInfo := file.NodeInfo(decl.Name) - if handler.HandleErrorf(nodeInfo, "package name (with whitespace removed) must be less than 512 characters long") != nil { + if handler.HandleErrorf(nodeInfo.Start(), "package name (with whitespace removed) must be less than 512 characters long") != nil { return } } if strings.Count(pkgName, ".") > 100 { nodeInfo := file.NodeInfo(decl.Name) - if handler.HandleErrorf(nodeInfo, "package name may not contain more than 100 periods") != nil { + if handler.HandleErrorf(nodeInfo.Start(), "package name may not contain more than 100 periods") != nil { return } } @@ -263,7 +235,7 @@ func (r *result) asUninterpretedOptionName(parts []*ast.FieldReferenceNode) []*d return ret } -func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldDescriptorProto, msgs *[]*descriptorpb.DescriptorProto, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) { +func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldDescriptorProto, msgs *[]*descriptorpb.DescriptorProto, isProto3 bool, handler *reporter.Handler, depth int) { extendee := string(ext.Extendee.AsIdentifier()) count := 0 for _, decl := range ext.Decls { @@ -271,13 +243,13 @@ func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldD case *ast.FieldNode: count++ // use higher limit since we don't know yet whether extendee is messageset wire format - fd := r.asFieldDescriptor(decl, internal.MaxTag, syntax, handler) + fd := r.asFieldDescriptor(decl, internal.MaxTag, isProto3, handler) fd.Extendee = proto.String(extendee) *flds = append(*flds, fd) case *ast.GroupNode: count++ // ditto: use higher limit right now - fd, md := r.asGroupDescriptors(decl, syntax, internal.MaxTag, handler, depth+1) + fd, md := r.asGroupDescriptors(decl, isProto3, internal.MaxTag, handler, depth+1) fd.Extendee = proto.String(extendee) *flds = append(*flds, fd) *msgs = append(*msgs, md) @@ -285,7 +257,7 @@ func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldD } if count == 0 { nodeInfo := r.file.NodeInfo(ext) - _ = handler.HandleErrorf(nodeInfo, "extend sections must define at least one extension") + _ = handler.HandleErrorf(nodeInfo.Start(), "extend sections must define at least one extension") } } @@ -303,20 +275,17 @@ func asLabel(lbl *ast.FieldLabel) *descriptorpb.FieldDescriptorProto_Label { } } -func (r *result) asFieldDescriptor(node *ast.FieldNode, maxTag int32, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.FieldDescriptorProto { - var tag *int32 - if node.Tag != nil { - if err := r.checkTag(node.Tag, node.Tag.Val, maxTag); err != nil { - _ = handler.HandleError(err) - } - tag = proto.Int32(int32(node.Tag.Val)) +func (r *result) asFieldDescriptor(node *ast.FieldNode, maxTag int32, isProto3 bool, handler *reporter.Handler) *descriptorpb.FieldDescriptorProto { + tag := node.Tag.Val + if err := r.checkTag(node.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) } - fd := newFieldDescriptor(node.Name.Val, string(node.FldType.AsIdentifier()), tag, asLabel(&node.Label)) + fd := newFieldDescriptor(node.Name.Val, string(node.FldType.AsIdentifier()), int32(tag), asLabel(&node.Label)) r.putFieldNode(fd, node) if opts := node.Options.GetElements(); len(opts) > 0 { fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} } - if syntax == protoreflect.Proto3 && fd.Label != nil && fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { + if isProto3 && fd.Label != nil && fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { fd.Proto3Optional = proto.Bool(true) } return fd @@ -340,11 +309,11 @@ var fieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{ "bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES, } -func newFieldDescriptor(name string, fieldType string, tag *int32, lbl *descriptorpb.FieldDescriptorProto_Label) *descriptorpb.FieldDescriptorProto { +func newFieldDescriptor(name string, fieldType string, tag int32, lbl *descriptorpb.FieldDescriptorProto_Label) *descriptorpb.FieldDescriptorProto { fd := &descriptorpb.FieldDescriptorProto{ Name: proto.String(name), JsonName: proto.String(internal.JSONName(name)), - Number: tag, + Number: proto.Int32(tag), Label: lbl, } t, ok := fieldTypes[fieldType] @@ -359,23 +328,20 @@ func newFieldDescriptor(name string, fieldType string, tag *int32, lbl *descript return fd } -func (r *result) asGroupDescriptors(group *ast.GroupNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { - var tag *int32 - if group.Tag != nil { - if err := r.checkTag(group.Tag, group.Tag.Val, maxTag); err != nil { - _ = handler.HandleError(err) - } - tag = proto.Int32(int32(group.Tag.Val)) +func (r *result) asGroupDescriptors(group *ast.GroupNode, isProto3 bool, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + tag := group.Tag.Val + if err := r.checkTag(group.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) } if !unicode.IsUpper(rune(group.Name.Val[0])) { nameNodeInfo := r.file.NodeInfo(group.Name) - _ = handler.HandleErrorf(nameNodeInfo, "group %s should have a name that starts with a capital letter", group.Name.Val) + _ = handler.HandleErrorf(nameNodeInfo.Start(), "group %s should have a name that starts with a capital letter", group.Name.Val) } fieldName := strings.ToLower(group.Name.Val) fd := &descriptorpb.FieldDescriptorProto{ Name: proto.String(fieldName), JsonName: proto.String(internal.JSONName(fieldName)), - Number: tag, + Number: proto.Int32(int32(tag)), Label: asLabel(&group.Label), Type: descriptorpb.FieldDescriptorProto_TYPE_GROUP.Enum(), TypeName: proto.String(group.Name.Val), @@ -385,35 +351,30 @@ func (r *result) asGroupDescriptors(group *ast.GroupNode, syntax protoreflect.Sy fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} } md := &descriptorpb.DescriptorProto{Name: proto.String(group.Name.Val)} - groupMsg := group.AsMessage() - r.putMessageNode(md, groupMsg) + r.putMessageNode(md, group) // don't bother processing body if we've exceeded depth - if r.checkDepth(depth, groupMsg, handler) { - r.addMessageBody(md, &group.MessageBody, syntax, handler, depth) + if r.checkDepth(depth, group, handler) { + r.addMessageBody(md, &group.MessageBody, isProto3, handler, depth) } return fd, md } -func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { - var tag *int32 - if mapField.Tag != nil { - if err := r.checkTag(mapField.Tag, mapField.Tag.Val, maxTag); err != nil { - _ = handler.HandleError(err) - } - tag = proto.Int32(int32(mapField.Tag.Val)) +func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, isProto3 bool, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) { + tag := mapField.Tag.Val + if err := r.checkTag(mapField.Tag, tag, maxTag); err != nil { + _ = handler.HandleError(err) } - mapEntry := mapField.AsMessage() - r.checkDepth(depth, mapEntry, handler) + r.checkDepth(depth, mapField, handler) var lbl *descriptorpb.FieldDescriptorProto_Label - if syntax == protoreflect.Proto2 { + if !isProto3 { lbl = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() } - keyFd := newFieldDescriptor("key", mapField.MapType.KeyType.Val, proto.Int32(1), lbl) + keyFd := newFieldDescriptor("key", mapField.MapType.KeyType.Val, 1, lbl) r.putFieldNode(keyFd, mapField.KeyField()) - valFd := newFieldDescriptor("value", string(mapField.MapType.ValueType.AsIdentifier()), proto.Int32(2), lbl) + valFd := newFieldDescriptor("value", string(mapField.MapType.ValueType.AsIdentifier()), 2, lbl) r.putFieldNode(valFd, mapField.ValueField()) entryName := internal.InitCap(internal.JSONName(mapField.Name.Val)) + "Entry" - fd := newFieldDescriptor(mapField.Name.Val, entryName, tag, descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum()) + fd := newFieldDescriptor(mapField.Name.Val, entryName, int32(tag), descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum()) if opts := mapField.Options.GetElements(); len(opts) > 0 { fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)} } @@ -423,7 +384,7 @@ func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, syntax protoreflec Options: &descriptorpb.MessageOptions{MapEntry: proto.Bool(true)}, Field: []*descriptorpb.FieldDescriptorProto{keyFd, valFd}, } - r.putMessageNode(md, mapEntry) + r.putMessageNode(md, mapField) return fd, md } @@ -439,7 +400,7 @@ func (r *result) asExtensionRanges(node *ast.ExtensionRangeNode, maxTag int32, h if len(opts) > 0 { er.Options = &descriptorpb.ExtensionRangeOptions{UninterpretedOption: opts} } - r.putExtensionRangeNode(er, node, rng) + r.putExtensionRangeNode(er, rng) ers[i] = er } return ers @@ -449,7 +410,7 @@ func (r *result) asEnumValue(ev *ast.EnumValueNode, handler *reporter.Handler) * num, ok := ast.AsInt32(ev.Number, math.MinInt32, math.MaxInt32) if !ok { numberNodeInfo := r.file.NodeInfo(ev.Number) - _ = handler.HandleErrorf(numberNodeInfo, "value %d is out of range: should be between %d and %d", ev.Number.Value(), math.MinInt32, math.MaxInt32) + _ = handler.HandleErrorf(numberNodeInfo.Start(), "value %d is out of range: should be between %d and %d", ev.Number.Value(), math.MinInt32, math.MaxInt32) } evd := &descriptorpb.EnumValueDescriptorProto{Name: proto.String(ev.Name.Val), Number: proto.Int32(num)} r.putEnumValueNode(evd, ev) @@ -486,10 +447,9 @@ func (r *result) asMethodDescriptor(node *ast.RPCNode) *descriptorpb.MethodDescr return md } -func (r *result) asEnumDescriptor(en *ast.EnumNode, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto { +func (r *result) asEnumDescriptor(en *ast.EnumNode, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto { ed := &descriptorpb.EnumDescriptorProto{Name: proto.String(en.Name.Val)} r.putEnumNode(ed, en) - rsvdNames := map[string]ast.SourcePos{} for _, decl := range en.Decls { switch decl := decl.(type) { case *ast.OptionNode: @@ -500,7 +460,9 @@ func (r *result) asEnumDescriptor(en *ast.EnumNode, syntax protoreflect.Syntax, case *ast.EnumValueNode: ed.Value = append(ed.Value, r.asEnumValue(decl, handler)) case *ast.ReservedNode: - r.addReservedNames(&ed.ReservedName, decl, syntax, handler, rsvdNames) + for _, n := range decl.Names { + ed.ReservedName = append(ed.ReservedName, n.AsString()) + } for _, rng := range decl.Ranges { ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng, handler)) } @@ -519,65 +481,30 @@ func (r *result) asEnumReservedRange(rng *ast.RangeNode, handler *reporter.Handl return rr } -func (r *result) asMessageDescriptor(node *ast.MessageNode, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) *descriptorpb.DescriptorProto { +func (r *result) asMessageDescriptor(node *ast.MessageNode, isProto3 bool, handler *reporter.Handler, depth int) *descriptorpb.DescriptorProto { msgd := &descriptorpb.DescriptorProto{Name: proto.String(node.Name.Val)} r.putMessageNode(msgd, node) // don't bother processing body if we've exceeded depth if r.checkDepth(depth, node, handler) { - r.addMessageBody(msgd, &node.MessageBody, syntax, handler, depth) + r.addMessageBody(msgd, &node.MessageBody, isProto3, handler, depth) } return msgd } -func (r *result) addReservedNames(names *[]string, node *ast.ReservedNode, syntax protoreflect.Syntax, handler *reporter.Handler, alreadyReserved map[string]ast.SourcePos) { - if syntax == protoreflect.Editions { - if len(node.Names) > 0 { - nameNodeInfo := r.file.NodeInfo(node.Names[0]) - _ = handler.HandleErrorf(nameNodeInfo, `must use identifiers, not string literals, to reserved names with editions`) - } - for _, n := range node.Identifiers { - name := string(n.AsIdentifier()) - nameNodeInfo := r.file.NodeInfo(n) - if existing, ok := alreadyReserved[name]; ok { - _ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing) - continue - } - alreadyReserved[name] = nameNodeInfo.Start() - *names = append(*names, name) - } - return - } - - if len(node.Identifiers) > 0 { - nameNodeInfo := r.file.NodeInfo(node.Identifiers[0]) - _ = handler.HandleErrorf(nameNodeInfo, `must use string literals, not identifiers, to reserved names with proto2 and proto3`) - } - for _, n := range node.Names { - name := n.AsString() - nameNodeInfo := r.file.NodeInfo(n) - if existing, ok := alreadyReserved[name]; ok { - _ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing) - continue - } - alreadyReserved[name] = nameNodeInfo.Start() - *names = append(*names, name) - } -} - func (r *result) checkDepth(depth int, node ast.MessageDeclNode, handler *reporter.Handler) bool { if depth < 32 { return true } n := ast.Node(node) - if grp, ok := n.(*ast.SyntheticGroupMessageNode); ok { + if grp, ok := n.(*ast.GroupNode); ok { // pinpoint the group keyword if the source is a group n = grp.Keyword } - _ = handler.HandleErrorf(r.file.NodeInfo(n), "message nesting depth must be less than 32") + _ = handler.HandleErrorf(r.file.NodeInfo(n).Start(), "message nesting depth must be less than 32") return false } -func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.MessageBody, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) { +func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.MessageBody, isProto3 bool, handler *reporter.Handler, depth int) { // first process any options for _, decl := range body.Decls { if opt, ok := decl.(*ast.OptionNode); ok { @@ -595,40 +522,40 @@ func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.Me if err != nil { return } else if messageSetOpt != nil { - if syntax == protoreflect.Proto3 { + if isProto3 { node := r.OptionNode(messageSetOpt) nodeInfo := r.file.NodeInfo(node) - _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format are not allowed with proto3 syntax") + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format are not allowed with proto3 syntax") } maxTag = internal.MaxTag // higher limit for messageset wire format } - rsvdNames := map[string]ast.SourcePos{} + rsvdNames := map[string]int{} // now we can process the rest for _, decl := range body.Decls { switch decl := decl.(type) { case *ast.EnumNode: - msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl, syntax, handler)) + msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl, handler)) case *ast.ExtendNode: - r.addExtensions(decl, &msgd.Extension, &msgd.NestedType, syntax, handler, depth) + r.addExtensions(decl, &msgd.Extension, &msgd.NestedType, isProto3, handler, depth) case *ast.ExtensionRangeNode: msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl, maxTag, handler)...) case *ast.FieldNode: - fd := r.asFieldDescriptor(decl, maxTag, syntax, handler) + fd := r.asFieldDescriptor(decl, maxTag, isProto3, handler) msgd.Field = append(msgd.Field, fd) case *ast.MapFieldNode: - fd, md := r.asMapDescriptors(decl, syntax, maxTag, handler, depth+1) + fd, md := r.asMapDescriptors(decl, isProto3, maxTag, handler, depth+1) msgd.Field = append(msgd.Field, fd) msgd.NestedType = append(msgd.NestedType, md) case *ast.GroupNode: - fd, md := r.asGroupDescriptors(decl, syntax, maxTag, handler, depth+1) + fd, md := r.asGroupDescriptors(decl, isProto3, maxTag, handler, depth+1) msgd.Field = append(msgd.Field, fd) msgd.NestedType = append(msgd.NestedType, md) - case *ast.OneofNode: + case *ast.OneOfNode: oodIndex := len(msgd.OneofDecl) ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(decl.Name.Val)} - r.putOneofNode(ood, decl) + r.putOneOfNode(ood, decl) msgd.OneofDecl = append(msgd.OneofDecl, ood) ooFields := 0 for _, oodecl := range decl.Decls { @@ -639,12 +566,12 @@ func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.Me } ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl)) case *ast.FieldNode: - fd := r.asFieldDescriptor(oodecl, maxTag, syntax, handler) + fd := r.asFieldDescriptor(oodecl, maxTag, isProto3, handler) fd.OneofIndex = proto.Int32(int32(oodIndex)) msgd.Field = append(msgd.Field, fd) ooFields++ case *ast.GroupNode: - fd, md := r.asGroupDescriptors(oodecl, syntax, maxTag, handler, depth+1) + fd, md := r.asGroupDescriptors(oodecl, isProto3, maxTag, handler, depth+1) fd.OneofIndex = proto.Int32(int32(oodIndex)) msgd.Field = append(msgd.Field, fd) msgd.NestedType = append(msgd.NestedType, md) @@ -653,12 +580,20 @@ func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.Me } if ooFields == 0 { declNodeInfo := r.file.NodeInfo(decl) - _ = handler.HandleErrorf(declNodeInfo, "oneof must contain at least one field") + _ = handler.HandleErrorf(declNodeInfo.Start(), "oneof must contain at least one field") } case *ast.MessageNode: - msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl, syntax, handler, depth+1)) + msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl, isProto3, handler, depth+1)) case *ast.ReservedNode: - r.addReservedNames(&msgd.ReservedName, decl, syntax, handler, rsvdNames) + for _, n := range decl.Names { + count := rsvdNames[n.AsString()] + if count == 1 { // already seen + nameNodeInfo := r.file.NodeInfo(n) + _ = handler.HandleErrorf(nameNodeInfo.Start(), "name %q is reserved multiple times", n.AsString()) + } + rsvdNames[n.AsString()] = count + 1 + msgd.ReservedName = append(msgd.ReservedName, n.AsString()) + } for _, rng := range decl.Ranges { msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng, maxTag, handler)) } @@ -669,24 +604,24 @@ func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.Me if len(msgd.Field) > 0 { node := r.FieldNode(msgd.Field[0]) nodeInfo := r.file.NodeInfo(node) - _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format cannot contain non-extension fields") + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format cannot contain non-extension fields") } if len(msgd.ExtensionRange) == 0 { node := r.OptionNode(messageSetOpt) nodeInfo := r.file.NodeInfo(node) - _ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format must contain at least one extension range") + _ = handler.HandleErrorf(nodeInfo.Start(), "messages with message-set wire format must contain at least one extension range") } } // process any proto3_optional fields - if syntax == protoreflect.Proto3 { + if isProto3 { r.processProto3OptionalFields(msgd) } } func (r *result) isMessageSetWireFormat(scope string, md *descriptorpb.DescriptorProto, handler *reporter.Handler) (*descriptorpb.UninterpretedOption, error) { uo := md.GetOptions().GetUninterpretedOption() - index, err := internal.FindOption(r, handler.HandleErrorf, scope, uo, "message_set_wire_format") + index, err := internal.FindOption(r, handler, scope, uo, "message_set_wire_format") if err != nil { return nil, err } @@ -705,7 +640,7 @@ func (r *result) isMessageSetWireFormat(scope string, md *descriptorpb.Descripto default: optNode := r.OptionNode(opt) optNodeInfo := r.file.NodeInfo(optNode.GetValue()) - return nil, handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for message_set_wire_format option", scope) + return nil, handler.HandleErrorf(optNodeInfo.Start(), "%s: expecting bool value for message_set_wire_format option", scope) } } @@ -725,7 +660,7 @@ func (r *result) getRangeBounds(rng *ast.RangeNode, minVal, maxVal int32, handle if !ok { checkOrder = false startValNodeInfo := r.file.NodeInfo(rng.StartVal) - _ = handler.HandleErrorf(startValNodeInfo, "range start %d is out of range: should be between %d and %d", rng.StartValue(), minVal, maxVal) + _ = handler.HandleErrorf(startValNodeInfo.Start(), "range start %d is out of range: should be between %d and %d", rng.StartValue(), minVal, maxVal) } end, ok := rng.EndValueAsInt32(minVal, maxVal) @@ -733,13 +668,13 @@ func (r *result) getRangeBounds(rng *ast.RangeNode, minVal, maxVal int32, handle checkOrder = false if rng.EndVal != nil { endValNodeInfo := r.file.NodeInfo(rng.EndVal) - _ = handler.HandleErrorf(endValNodeInfo, "range end %d is out of range: should be between %d and %d", rng.EndValue(), minVal, maxVal) + _ = handler.HandleErrorf(endValNodeInfo.Start(), "range end %d is out of range: should be between %d and %d", rng.EndValue(), minVal, maxVal) } } if checkOrder && start > end { rangeStartNodeInfo := r.file.NodeInfo(rng.RangeStart()) - _ = handler.HandleErrorf(rangeStartNodeInfo, "range, %d to %d, is invalid: start must be <= end", start, end) + _ = handler.HandleErrorf(rangeStartNodeInfo.Start(), "range, %d to %d, is invalid: start must be <= end", start, end) } return start, end @@ -765,11 +700,11 @@ func (r *result) asServiceDescriptor(svc *ast.ServiceNode) *descriptorpb.Service func (r *result) checkTag(n ast.Node, v uint64, maxTag int32) error { switch { case v < 1: - return reporter.Errorf(r.file.NodeInfo(n), "tag number %d must be greater than zero", v) + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d must be greater than zero", v) case v > uint64(maxTag): - return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is higher than max allowed tag number (%d)", v, maxTag) + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d is higher than max allowed tag number (%d)", v, maxTag) case v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd: - return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd) + return reporter.Errorf(r.file.NodeInfo(n).Start(), "tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd) default: return nil } @@ -834,112 +769,105 @@ func (r *result) processProto3OptionalFields(msgd *descriptorpb.DescriptorProto) ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)} msgd.OneofDecl = append(msgd.OneofDecl, ood) ooident := r.FieldNode(fd).(*ast.FieldNode) //nolint:errcheck - r.putOneofNode(ood, ast.NewSyntheticOneof(ooident)) + r.putOneOfNode(ood, ast.NewSyntheticOneOf(ooident)) } } } func (r *result) Node(m proto.Message) ast.Node { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[m] } func (r *result) FileNode() ast.FileDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[r.proto].(ast.FileDeclNode) } func (r *result) OptionNode(o *descriptorpb.UninterpretedOption) ast.OptionDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[o].(ast.OptionDeclNode) } func (r *result) OptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart) ast.Node { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[o] } func (r *result) MessageNode(m *descriptorpb.DescriptorProto) ast.MessageDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[m].(ast.MessageDeclNode) } func (r *result) FieldNode(f *descriptorpb.FieldDescriptorProto) ast.FieldDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[f].(ast.FieldDeclNode) } -func (r *result) OneofNode(o *descriptorpb.OneofDescriptorProto) ast.OneofDeclNode { - if r.nodes == nil { - return r.ifNoAST - } - return r.nodes[o].(ast.OneofDeclNode) -} - -func (r *result) ExtensionsNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions { +func (r *result) OneOfNode(o *descriptorpb.OneofDescriptorProto) ast.Node { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } - return r.nodes[asExtsNode(e)].(ast.NodeWithOptions) + return r.nodes[o] } func (r *result) ExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[e].(ast.RangeDeclNode) } func (r *result) MessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[rr].(ast.RangeDeclNode) } -func (r *result) EnumNode(e *descriptorpb.EnumDescriptorProto) ast.NodeWithOptions { +func (r *result) EnumNode(e *descriptorpb.EnumDescriptorProto) ast.Node { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } - return r.nodes[e].(ast.NodeWithOptions) + return r.nodes[e] } func (r *result) EnumValueNode(e *descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[e].(ast.EnumValueDeclNode) } func (r *result) EnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[rr].(ast.RangeDeclNode) } -func (r *result) ServiceNode(s *descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions { +func (r *result) ServiceNode(s *descriptorpb.ServiceDescriptorProto) ast.Node { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } - return r.nodes[s].(ast.NodeWithOptions) + return r.nodes[s] } func (r *result) MethodNode(m *descriptorpb.MethodDescriptorProto) ast.RPCDeclNode { if r.nodes == nil { - return r.ifNoAST + return ast.NewNoSourceNode(r.proto.GetName()) } return r.nodes[m].(ast.RPCDeclNode) } @@ -964,12 +892,11 @@ func (r *result) putFieldNode(f *descriptorpb.FieldDescriptorProto, n ast.FieldD r.nodes[f] = n } -func (r *result) putOneofNode(o *descriptorpb.OneofDescriptorProto, n ast.OneofDeclNode) { +func (r *result) putOneOfNode(o *descriptorpb.OneofDescriptorProto, n ast.OneOfDeclNode) { r.nodes[o] = n } -func (r *result) putExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange, er *ast.ExtensionRangeNode, n *ast.RangeNode) { - r.nodes[asExtsNode(e)] = er +func (r *result) putExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange, n *ast.RangeNode) { r.nodes[e] = n } @@ -999,14 +926,3 @@ func (r *result) putMethodNode(m *descriptorpb.MethodDescriptorProto, n *ast.RPC // NB: If we ever add other put*Node methods, to index other kinds of elements in the descriptor // proto hierarchy, we need to update the index recreation logic in clone.go, too. - -func asExtsNode(er *descriptorpb.DescriptorProto_ExtensionRange) proto.Message { - return extsParent{er} -} - -// a simple marker type that allows us to have two distinct keys in a map for -// the same ExtensionRange proto -- one for the range itself and another to -// associate with the enclosing/parent AST node. -type extsParent struct { - *descriptorpb.DescriptorProto_ExtensionRange -} diff --git a/vendor/github.com/bufbuild/protocompile/parser/validate.go b/vendor/github.com/bufbuild/protocompile/parser/validate.go index 64ebdaa3..494a00ef 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/validate.go +++ b/vendor/github.com/bufbuild/protocompile/parser/validate.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,55 +30,26 @@ import ( func validateBasic(res *result, handler *reporter.Handler) { fd := res.proto - var syntax protoreflect.Syntax - switch fd.GetSyntax() { - case "", "proto2": - syntax = protoreflect.Proto2 - case "proto3": - syntax = protoreflect.Proto3 - case "editions": - syntax = protoreflect.Editions - // TODO: default: error? - } + isProto3 := fd.GetSyntax() == "proto3" if err := validateImports(res, handler); err != nil { return } - if err := validateNoFeatures(res, syntax, "file options", fd.Options.GetUninterpretedOption(), handler); err != nil { - return - } - _ = walk.DescriptorProtos(fd, func(name protoreflect.FullName, d proto.Message) error { switch d := d.(type) { case *descriptorpb.DescriptorProto: - if err := validateMessage(res, syntax, name, d, handler); err != nil { + if err := validateMessage(res, isProto3, name, d, handler); err != nil { // exit func is not called when enter returns error return err } - case *descriptorpb.FieldDescriptorProto: - if err := validateField(res, syntax, name, d, handler); err != nil { - return err - } - case *descriptorpb.OneofDescriptorProto: - if err := validateNoFeatures(res, syntax, fmt.Sprintf("oneof %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { - return err - } case *descriptorpb.EnumDescriptorProto: - if err := validateEnum(res, syntax, name, d, handler); err != nil { - return err - } - case *descriptorpb.EnumValueDescriptorProto: - if err := validateNoFeatures(res, syntax, fmt.Sprintf("enum value %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + if err := validateEnum(res, isProto3, name, d, handler); err != nil { return err } - case *descriptorpb.ServiceDescriptorProto: - if err := validateNoFeatures(res, syntax, fmt.Sprintf("service %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { - return err - } - case *descriptorpb.MethodDescriptorProto: - if err := validateNoFeatures(res, syntax, fmt.Sprintf("method %s", name), d.Options.GetUninterpretedOption(), handler); err != nil { + case *descriptorpb.FieldDescriptorProto: + if err := validateField(res, isProto3, name, d, handler); err != nil { return err } } @@ -97,56 +68,52 @@ func validateImports(res *result, handler *reporter.Handler) error { if !ok { continue } - info := fileNode.NodeInfo(decl) + startPos := fileNode.NodeInfo(decl).Start() name := imp.Name.AsString() if prev, ok := imports[name]; ok { - return handler.HandleErrorf(info, "%q was already imported at %v", name, prev) - } - imports[name] = info.Start() - } - return nil -} - -func validateNoFeatures(res *result, syntax protoreflect.Syntax, scope string, opts []*descriptorpb.UninterpretedOption, handler *reporter.Handler) error { - if syntax == protoreflect.Editions { - // Editions is allowed to use features - return nil - } - if index, err := internal.FindFirstOption(res, handler.HandleErrorf, scope, opts, "features"); err != nil { - return err - } else if index >= 0 { - optNode := res.OptionNode(opts[index]) - optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) - if err := handler.HandleErrorf(optNameNodeInfo, "%s: option 'features' may only be used with editions but file uses %s syntax", scope, syntax); err != nil { - return err + return handler.HandleErrorf(startPos, "%q was already imported at %v", name, prev) } + imports[name] = startPos } return nil } -func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { +func validateMessage(res *result, isProto3 bool, name protoreflect.FullName, md *descriptorpb.DescriptorProto, handler *reporter.Handler) error { scope := fmt.Sprintf("message %s", name) - if syntax == protoreflect.Proto3 && len(md.ExtensionRange) > 0 { + if isProto3 && len(md.ExtensionRange) > 0 { n := res.ExtensionRangeNode(md.ExtensionRange[0]) nInfo := res.file.NodeInfo(n) - if err := handler.HandleErrorf(nInfo, "%s: extension ranges are not allowed in proto3", scope); err != nil { + if err := handler.HandleErrorf(nInfo.Start(), "%s: extension ranges are not allowed in proto3", scope); err != nil { return err } } - if index, err := internal.FindOption(res, handler.HandleErrorf, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil { + if index, err := internal.FindOption(res, handler, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil { return err } else if index >= 0 { - optNode := res.OptionNode(md.Options.GetUninterpretedOption()[index]) - optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) - if err := handler.HandleErrorf(optNameNodeInfo, "%s: map_entry option should not be set explicitly; use map type instead", scope); err != nil { - return err + opt := md.Options.UninterpretedOption[index] + optn := res.OptionNode(opt) + md.Options.UninterpretedOption = internal.RemoveOption(md.Options.UninterpretedOption, index) + valid := false + if opt.IdentifierValue != nil { + if opt.GetIdentifierValue() == "true" { + valid = true + optionNodeInfo := res.file.NodeInfo(optn.GetValue()) + if err := handler.HandleErrorf(optionNodeInfo.Start(), "%s: map_entry option should not be set explicitly; use map type instead", scope); err != nil { + return err + } + } else if opt.GetIdentifierValue() == "false" { + valid = true + md.Options.MapEntry = proto.Bool(false) + } + } + if !valid { + optionNodeInfo := res.file.NodeInfo(optn.GetValue()) + if err := handler.HandleErrorf(optionNodeInfo.Start(), "%s: expecting bool value for map_entry option", scope); err != nil { + return err + } } - } - - if err := validateNoFeatures(res, syntax, scope, md.Options.GetUninterpretedOption(), handler); err != nil { - return err } // reserved ranges should not overlap @@ -159,7 +126,7 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. for i := 1; i < len(rsvd); i++ { if rsvd[i].start < rsvd[i-1].end { rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) - if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { return err } } @@ -168,9 +135,6 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. // extensions ranges should not overlap exts := make(tagRanges, len(md.ExtensionRange)) for i, r := range md.ExtensionRange { - if err := validateNoFeatures(res, syntax, scope, r.Options.GetUninterpretedOption(), handler); err != nil { - return err - } n := res.ExtensionRangeNode(r) exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n} } @@ -178,7 +142,7 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. for i := 1; i < len(exts); i++ { if exts[i].start < exts[i-1].end { rangeNodeInfo := res.file.NodeInfo(exts[i].node) - if err := handler.HandleErrorf(rangeNodeInfo, "%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1); err != nil { + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1); err != nil { return err } } @@ -189,16 +153,16 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. for i < len(rsvd) && j < len(exts) { if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end || exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end { - var span ast.SourceSpan + var pos ast.SourcePos if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end { rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) - span = rangeNodeInfo + pos = rangeNodeInfo.Start() } else { rangeNodeInfo := res.file.NodeInfo(exts[j].node) - span = rangeNodeInfo + pos = rangeNodeInfo.Start() } // ranges overlap - if err := handler.HandleErrorf(span, "%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { + if err := handler.HandleErrorf(pos, "%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1); err != nil { return err } } @@ -217,7 +181,7 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. if !isIdentifier(n) { node := findMessageReservedNameNode(res.MessageNode(md), n) nodeInfo := res.file.NodeInfo(node) - if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + if err := handler.HandleErrorf(nodeInfo.Start(), "%s: reserved name %q is not a valid identifier", scope, n); err != nil { return err } } @@ -228,13 +192,13 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. fn := res.FieldNode(fld) if _, ok := rsvdNames[fld.GetName()]; ok { fieldNameNodeInfo := res.file.NodeInfo(fn.FieldName()) - if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field %s is using a reserved name", scope, fld.GetName()); err != nil { + if err := handler.HandleErrorf(fieldNameNodeInfo.Start(), "%s: field %s is using a reserved name", scope, fld.GetName()); err != nil { return err } } if existing := fieldTags[fld.GetNumber()]; existing != "" { fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) - if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber()); err != nil { + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber()); err != nil { return err } } @@ -243,7 +207,7 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() }) if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() { fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) - if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1); err != nil { + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1); err != nil { return err } } @@ -251,7 +215,7 @@ func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect. e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() }) if e < len(exts) && exts[e].start <= fld.GetNumber() { fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag()) - if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1); err != nil { + if err := handler.HandleErrorf(fieldTagNodeInfo.Start(), "%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1); err != nil { return err } } @@ -287,7 +251,7 @@ func findMessageReservedNameNode(msgNode ast.MessageDeclNode, name string) ast.N switch msgNode := msgNode.(type) { case *ast.MessageNode: decls = msgNode.Decls - case *ast.SyntheticGroupMessageNode: + case *ast.GroupNode: decls = msgNode.Decls default: // leave decls empty @@ -314,24 +278,20 @@ func findReservedNameNode[T ast.Node](parent ast.Node, decls []T, name string) a return parent } -func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { +func validateEnum(res *result, isProto3 bool, name protoreflect.FullName, ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error { scope := fmt.Sprintf("enum %s", name) if len(ed.Value) == 0 { enNode := res.EnumNode(ed) enNodeInfo := res.file.NodeInfo(enNode) - if err := handler.HandleErrorf(enNodeInfo, "%s: enums must define at least one value", scope); err != nil { + if err := handler.HandleErrorf(enNodeInfo.Start(), "%s: enums must define at least one value", scope); err != nil { return err } } - if err := validateNoFeatures(res, syntax, scope, ed.Options.GetUninterpretedOption(), handler); err != nil { - return err - } - allowAlias := false var allowAliasOpt *descriptorpb.UninterpretedOption - if index, err := internal.FindOption(res, handler.HandleErrorf, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil { + if index, err := internal.FindOption(res, handler, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil { return err } else if index >= 0 { allowAliasOpt = ed.Options.UninterpretedOption[index] @@ -347,16 +307,16 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful if !valid { optNode := res.OptionNode(allowAliasOpt) optNodeInfo := res.file.NodeInfo(optNode.GetValue()) - if err := handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for allow_alias option", scope); err != nil { + if err := handler.HandleErrorf(optNodeInfo.Start(), "%s: expecting bool value for allow_alias option", scope); err != nil { return err } } } - if syntax == protoreflect.Proto3 && len(ed.Value) > 0 && ed.Value[0].GetNumber() != 0 { + if isProto3 && len(ed.Value) > 0 && ed.Value[0].GetNumber() != 0 { evNode := res.EnumValueNode(ed.Value[0]) evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) - if err := handler.HandleErrorf(evNodeInfo, "%s: proto3 requires that first value of enum have numeric value zero", scope); err != nil { + if err := handler.HandleErrorf(evNodeInfo.Start(), "%s: proto3 requires that first value in enum have numeric value of 0", scope); err != nil { return err } } @@ -372,7 +332,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful } else { evNode := res.EnumValueNode(evd) evNodeInfo := res.file.NodeInfo(evNode.GetNumber()) - if err := handler.HandleErrorf(evNodeInfo, "%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber()); err != nil { + if err := handler.HandleErrorf(evNodeInfo.Start(), "%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber()); err != nil { return err } } @@ -382,7 +342,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful if allowAlias && !hasAlias { optNode := res.OptionNode(allowAliasOpt) optNodeInfo := res.file.NodeInfo(optNode.GetValue()) - if err := handler.HandleErrorf(optNodeInfo, "%s: allow_alias is true but no values are aliases", scope); err != nil { + if err := handler.HandleErrorf(optNodeInfo.Start(), "%s: allow_alias is true but no values are aliases", scope); err != nil { return err } } @@ -397,7 +357,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful for i := 1; i < len(rsvd); i++ { if rsvd[i].start <= rsvd[i-1].end { rangeNodeInfo := res.file.NodeInfo(rsvd[i].node) - if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end); err != nil { + if err := handler.HandleErrorf(rangeNodeInfo.Start(), "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end); err != nil { return err } } @@ -411,7 +371,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful if !isIdentifier(n) { node := findEnumReservedNameNode(res.EnumNode(ed), n) nodeInfo := res.file.NodeInfo(node) - if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil { + if err := handler.HandleErrorf(nodeInfo.Start(), "%s: reserved name %q is not a valid identifier", scope, n); err != nil { return err } } @@ -421,7 +381,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful evn := res.EnumValueNode(ev) if _, ok := rsvdNames[ev.GetName()]; ok { enumValNodeInfo := res.file.NodeInfo(evn.GetName()) - if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using a reserved name", scope, ev.GetName()); err != nil { + if err := handler.HandleErrorf(enumValNodeInfo.Start(), "%s: value %s is using a reserved name", scope, ev.GetName()); err != nil { return err } } @@ -429,7 +389,7 @@ func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.Ful r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() }) if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() { enumValNodeInfo := res.file.NodeInfo(evn.GetNumber()) - if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end); err != nil { + if err := handler.HandleErrorf(enumValNodeInfo.Start(), "%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end); err != nil { return err } } @@ -447,76 +407,47 @@ func findEnumReservedNameNode(enumNode ast.Node, name string) ast.Node { return findReservedNameNode(enumNode, decls, name) } -func validateField(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, fld *descriptorpb.FieldDescriptorProto, handler *reporter.Handler) error { - var scope string - if fld.Extendee != nil { - scope = fmt.Sprintf("extension %s", name) - } else { - scope = fmt.Sprintf("field %s", name) - } +func validateField(res *result, isProto3 bool, name protoreflect.FullName, fld *descriptorpb.FieldDescriptorProto, handler *reporter.Handler) error { + scope := fmt.Sprintf("field %s", name) node := res.FieldNode(fld) - if fld.Number == nil { - fieldTagNodeInfo := res.file.NodeInfo(node) - if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: missing field tag number", scope); err != nil { - return err - } - } - if syntax != protoreflect.Proto2 { + if isProto3 { if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { groupNodeInfo := res.file.NodeInfo(node.GetGroupKeyword()) - if err := handler.HandleErrorf(groupNodeInfo, "%s: groups are not allowed in proto3 or editions", scope); err != nil { + if err := handler.HandleErrorf(groupNodeInfo.Start(), "%s: groups are not allowed in proto3", scope); err != nil { return err } } else if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) - if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'required' is not allowed in proto3 or editions", scope); err != nil { + if err := handler.HandleErrorf(fieldLabelNodeInfo.Start(), "%s: label 'required' is not allowed in proto3", scope); err != nil { return err } } - if syntax == protoreflect.Editions { - if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { - fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) - if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'optional' is not allowed in editions; use option features.field_presence instead", scope); err != nil { - return err - } - } - if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "packed"); err != nil { - return err - } else if index >= 0 { - optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) - optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) - if err := handler.HandleErrorf(optNameNodeInfo, "%s: packed option is not allowed in editions; use option features.repeated_field_encoding instead", scope); err != nil { - return err - } - } - } else if syntax == protoreflect.Proto3 { - if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil { + if index, err := internal.FindOption(res, handler, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil { + return err + } else if index >= 0 { + optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) + optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) + if err := handler.HandleErrorf(optNameNodeInfo.Start(), "%s: default values are not allowed in proto3", scope); err != nil { return err - } else if index >= 0 { - optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index]) - optNameNodeInfo := res.file.NodeInfo(optNode.GetName()) - if err := handler.HandleErrorf(optNameNodeInfo, "%s: default values are not allowed in proto3", scope); err != nil { - return err - } } } } else { if fld.Label == nil && fld.OneofIndex == nil { fieldNameNodeInfo := res.file.NodeInfo(node.FieldName()) - if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field has no label; proto2 requires explicit 'optional' label", scope); err != nil { + if err := handler.HandleErrorf(fieldNameNodeInfo.Start(), "%s: field has no label; proto2 requires explicit 'optional' label", scope); err != nil { return err } } if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel()) - if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: extension fields cannot be 'required'", scope); err != nil { + if err := handler.HandleErrorf(fieldLabelNodeInfo.Start(), "%s: extension fields cannot be 'required'", scope); err != nil { return err } } } - return validateNoFeatures(res, syntax, scope, fld.Options.GetUninterpretedOption(), handler) + return nil } type tagRange struct { diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go deleted file mode 100644 index fb21dff6..00000000 --- a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protoutil - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/descriptorpb" - "google.golang.org/protobuf/types/dynamicpb" - - "github.com/bufbuild/protocompile/internal/editions" -) - -// GetFeatureDefault gets the default value for the given feature and the given -// edition. The given feature must represent a field of the google.protobuf.FeatureSet -// message and must not be an extension. -// -// If the given field is from a dynamically built descriptor (i.e. it's containing -// message descriptor is different from the linked-in descriptor for -// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such -// cases, the value may not be directly usable using [protoreflect.Message.Set] with -// an instance of [*descriptorpb.FeatureSet] and must instead be used with a -// [*dynamicpb.Message]. -// -// To get the default value of a custom feature, use [GetCustomFeatureDefault] -// instead. -func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { - if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() { - return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s", - feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName()) - } - var msgType protoreflect.MessageType - if feature.ContainingMessage() == editions.FeatureSetDescriptor { - msgType = editions.FeatureSetType - } else { - msgType = dynamicpb.NewMessageType(feature.ContainingMessage()) - } - return editions.GetFeatureDefault(edition, msgType, feature) -} - -// GetCustomFeatureDefault gets the default value for the given custom feature -// and given edition. A custom feature is a field whose containing message is the -// type of an extension field of google.protobuf.FeatureSet. The given extension -// describes that extension field and message type. The given feature must be a -// field of that extension's message type. -func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { - extDesc := extension.TypeDescriptor() - if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() { - return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName()) - } - if extDesc.Message() == nil { - return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s", - editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String()) - } - if feature.IsExtension() { - return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions", - feature.FullName(), extDesc.FullName()) - } - if feature.ContainingMessage().FullName() != extDesc.Message().FullName() { - return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s", - feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName()) - } - if feature.ContainingMessage() != extDesc.Message() { - return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s", - feature.Name(), extDesc.Message().FullName()) - } - return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature) -} - -// ResolveFeature resolves a feature for the given descriptor. -// -// If the given element is in a proto2 or proto3 syntax file, this skips -// resolution and just returns the relevant default (since such files are not -// allowed to override features). If neither the given element nor any of its -// ancestors override the given feature, the relevant default is returned. -// -// This has the same caveat as GetFeatureDefault if the given feature is from a -// dynamically built descriptor. -func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { - edition := editions.GetEdition(element) - defaultVal, err := GetFeatureDefault(edition, feature) - if err != nil { - return protoreflect.Value{}, err - } - return resolveFeature(edition, defaultVal, element, feature) -} - -// ResolveCustomFeature resolves a custom feature for the given extension and -// field descriptor. -// -// The given extension must be an extension of google.protobuf.FeatureSet that -// represents a non-repeated message value. The given feature is a field in -// that extension's message type. -// -// If the given element is in a proto2 or proto3 syntax file, this skips -// resolution and just returns the relevant default (since such files are not -// allowed to override features). If neither the given element nor any of its -// ancestors override the given feature, the relevant default is returned. -func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) { - edition := editions.GetEdition(element) - defaultVal, err := GetCustomFeatureDefault(edition, extension, feature) - if err != nil { - return protoreflect.Value{}, err - } - return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature) -} - -func resolveFeature( - edition descriptorpb.Edition, - defaultVal protoreflect.Value, - element protoreflect.Descriptor, - fields ...protoreflect.FieldDescriptor, -) (protoreflect.Value, error) { - if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 { - // these syntax levels can't specify features, so we can short-circuit the search - // through the descriptor hierarchy for feature overrides - return defaultVal, nil - } - val, err := editions.ResolveFeature(element, fields...) - if err != nil { - return protoreflect.Value{}, err - } - if val.IsValid() { - return val, nil - } - return defaultVal, nil -} diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go index 9c559993..ad804426 100644 --- a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go +++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,11 @@ // Package protoutil contains useful functions for interacting with descriptors. // For now these include only functions for efficiently converting descriptors -// produced by the compiler to descriptor protos and functions for resolving -// "features" (a core concept of Protobuf Editions). +// produced by the compiler to descriptor protos. // // Despite the fact that descriptor protos are mutable, calling code should NOT // mutate any of the protos returned from this package. For efficiency, some -// values returned from this package may reference internal state of a compiler +// protos returned from this package may be part of internal state of a compiler // result, and mutating the proto could corrupt or invalidate parts of that // result. package protoutil @@ -46,7 +45,7 @@ import ( // FileDescriptor | FileDescriptorProto() // MessageDescriptor | MessageDescriptorProto() // FieldDescriptor | FieldDescriptorProto() -// OneofDescriptor | OneofDescriptorProto() +// OneofDescriptor | OneOfDescriptorProto() // EnumDescriptor | EnumDescriptorProto() // EnumValueDescriptor | EnumValueDescriptorProto() // ServiceDescriptor | ServiceDescriptorProto() diff --git a/vendor/github.com/bufbuild/protocompile/reporter/errors.go b/vendor/github.com/bufbuild/protocompile/reporter/errors.go index 3a70a43e..2932c79f 100644 --- a/vendor/github.com/bufbuild/protocompile/reporter/errors.go +++ b/vendor/github.com/bufbuild/protocompile/reporter/errors.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,45 +30,39 @@ var ErrInvalidSource = errors.New("parse failed: invalid proto source") // about the location in the file that caused the error. type ErrorWithPos interface { error - ast.SourceSpan - // GetPosition returns the start source position that caused the underlying error. + // GetPosition returns the source position that caused the underlying error. GetPosition() ast.SourcePos // Unwrap returns the underlying error. Unwrap() error } // Error creates a new ErrorWithPos from the given error and source position. -func Error(span ast.SourceSpan, err error) ErrorWithPos { - var ewp ErrorWithPos - if errors.As(err, &ewp) { - // replace existing position with given one - return &errorWithSpan{SourceSpan: span, underlying: ewp.Unwrap()} - } - return &errorWithSpan{SourceSpan: span, underlying: err} +func Error(pos ast.SourcePos, err error) ErrorWithPos { + return errorWithSourcePos{pos: pos, underlying: err} } // Errorf creates a new ErrorWithPos whose underlying error is created using the // given message format and arguments (via fmt.Errorf). -func Errorf(span ast.SourceSpan, format string, args ...interface{}) ErrorWithPos { - return Error(span, fmt.Errorf(format, args...)) +func Errorf(pos ast.SourcePos, format string, args ...interface{}) ErrorWithPos { + return errorWithSourcePos{pos: pos, underlying: fmt.Errorf(format, args...)} } -type errorWithSpan struct { - ast.SourceSpan +type errorWithSourcePos struct { underlying error + pos ast.SourcePos } -func (e *errorWithSpan) Error() string { +func (e errorWithSourcePos) Error() string { sourcePos := e.GetPosition() return fmt.Sprintf("%s: %v", sourcePos, e.underlying) } -func (e *errorWithSpan) GetPosition() ast.SourcePos { - return e.Start() +func (e errorWithSourcePos) GetPosition() ast.SourcePos { + return e.pos } -func (e *errorWithSpan) Unwrap() error { +func (e errorWithSourcePos) Unwrap() error { return e.underlying } -var _ ErrorWithPos = (*errorWithSpan)(nil) +var _ ErrorWithPos = errorWithSourcePos{} diff --git a/vendor/github.com/bufbuild/protocompile/reporter/reporter.go b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go index 8e906406..d3a31686 100644 --- a/vendor/github.com/bufbuild/protocompile/reporter/reporter.go +++ b/vendor/github.com/bufbuild/protocompile/reporter/reporter.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -152,8 +152,14 @@ func (h *Handler) HandleError(err error) error { // If the handler has already aborted (by returning a non-nil error from a prior // call to HandleError or HandleErrorf), that same error is returned and the // given error is not reported. -func (h *Handler) HandleErrorWithPos(span ast.SourceSpan, err error) error { - return h.HandleError(Error(span, err)) +func (h *Handler) HandleErrorWithPos(pos ast.SourcePos, err error) error { + if ewp, ok := err.(ErrorWithPos); ok { + // replace existing position with given one + err = errorWithSourcePos{pos: pos, underlying: ewp.Unwrap()} + } else { + err = errorWithSourcePos{pos: pos, underlying: err} + } + return h.HandleError(err) } // HandleErrorf handles an error with the given source position, creating the @@ -162,8 +168,8 @@ func (h *Handler) HandleErrorWithPos(span ast.SourceSpan, err error) error { // If the handler has already aborted (by returning a non-nil error from a call // to HandleError or HandleErrorf), that same error is returned and the given // error is not reported. -func (h *Handler) HandleErrorf(span ast.SourceSpan, format string, args ...interface{}) error { - return h.HandleError(Errorf(span, format, args...)) +func (h *Handler) HandleErrorf(pos ast.SourcePos, format string, args ...interface{}) error { + return h.HandleError(Errorf(pos, format, args...)) } // HandleWarning handles the given warning. This will delegate to the handler's @@ -184,14 +190,21 @@ func (h *Handler) HandleWarning(err ErrorWithPos) { // HandleWarningWithPos handles a warning with the given source position. This will // delegate to the handler's configured reporter. -func (h *Handler) HandleWarningWithPos(span ast.SourceSpan, err error) { - h.HandleWarning(Error(span, err)) +func (h *Handler) HandleWarningWithPos(pos ast.SourcePos, err error) { + ewp, ok := err.(ErrorWithPos) + if ok { + // replace existing position with given one + ewp = errorWithSourcePos{pos: pos, underlying: ewp.Unwrap()} + } else { + ewp = errorWithSourcePos{pos: pos, underlying: err} + } + h.HandleWarning(ewp) } // HandleWarningf handles a warning with the given source position, creating the // actual error value using the given message format and arguments. -func (h *Handler) HandleWarningf(span ast.SourceSpan, format string, args ...interface{}) { - h.HandleWarning(Errorf(span, format, args...)) +func (h *Handler) HandleWarningf(pos ast.SourcePos, format string, args ...interface{}) { + h.HandleWarning(Errorf(pos, format, args...)) } // Error returns the handler result. If any errors have been reported then this diff --git a/vendor/github.com/bufbuild/protocompile/resolver.go b/vendor/github.com/bufbuild/protocompile/resolver.go index 400d554b..6838ef0f 100644 --- a/vendor/github.com/bufbuild/protocompile/resolver.go +++ b/vendor/github.com/bufbuild/protocompile/resolver.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -180,27 +180,6 @@ func SourceAccessorFromMap(srcs map[string]string) func(string) (io.ReadCloser, // WithStandardImports returns a new resolver that knows about the same standard // imports that are included with protoc. -// -// Note that this uses the descriptors embedded in generated code in the packages -// of the Protobuf Go module, except for "google/protobuf/cpp_features.proto" and -// "google/protobuf/java_features.proto". For those two files, compiled descriptors -// are embedded in this module because there is no package in the Protobuf Go module -// that contains generated code for those files. This resolver also provides results -// for the "google/protobuf/go_features.proto", which is technically not a standard -// file (it is not included with protoc) but is included in generated code in the -// Protobuf Go module. -// -// As of v0.14.0 of this module (and v1.34.2 of the Protobuf Go module and v27.0 of -// Protobuf), the contents of the standard import "google/protobuf/descriptor.proto" -// contain extension declarations which are *absent* from the descriptors that this -// resolver returns. That is because extension declarations are only retained in -// source, not at runtime, which means they are not available in the embedded -// descriptors in generated code. -// -// To use versions of the standard imports that *do* include these extension -// declarations, see wellknownimports.WithStandardImports instead. As of this -// writing, the declarations are only needed to prevent source files from -// illegally re-defining the custom features for C++, Java, and Go. func WithStandardImports(r Resolver) Resolver { return ResolverFunc(func(name string) (SearchResult, error) { res, err := r.FindFileByPath(name) diff --git a/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go index 3b0ae657..6f32f28d 100644 --- a/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go +++ b/vendor/github.com/bufbuild/protocompile/sourceinfo/source_code_info.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -29,128 +29,44 @@ import ( "github.com/bufbuild/protocompile/ast" "github.com/bufbuild/protocompile/internal" + "github.com/bufbuild/protocompile/options" ) -// OptionIndex is a mapping of AST nodes that define options to corresponding -// paths into the containing file descriptor. The path is a sequence of field -// tags and indexes that define a traversal path from the root (the file -// descriptor) to the resolved option field. The info also includes similar -// information about child elements, for options whose values are composite -// (like a list or message literal). -type OptionIndex map[*ast.OptionNode]*OptionSourceInfo - -// OptionSourceInfo describes the source info path for an option value and -// contains information about the value's descendants in the AST. -type OptionSourceInfo struct { - // The source info path to this element. If this element represents a - // declaration with an array-literal value, the last element of the - // path is the index of the first item in the array. - // - // This path is relative to the options message. So the first element - // is a field number of the options message. - // - // If the first element is negative, it indicates the number of path - // components to remove from the path to the relevant options. This is - // used for field pseudo-options, so that the path indicates a field on - // the descriptor, which is a parent of the options message (since that - // is how the pseudo-options are actually stored). - Path []int32 - // Children can be an *ArrayLiteralSourceInfo, a *MessageLiteralSourceInfo, - // or nil, depending on whether the option's value is an - // [*ast.ArrayLiteralNode], an [*ast.MessageLiteralNode], or neither. - // For [*ast.ArrayLiteralNode] values, this is only populated if the - // value is a non-empty array of messages. (Empty arrays and arrays - // of scalar values do not need any additional info.) - Children OptionChildrenSourceInfo -} - -// OptionChildrenSourceInfo represents source info paths for child elements of -// an option value. -type OptionChildrenSourceInfo interface { - isChildSourceInfo() -} - -// ArrayLiteralSourceInfo represents source info paths for the child -// elements of an [*ast.ArrayLiteralNode]. This value is only useful for -// non-empty array literals that contain messages. -type ArrayLiteralSourceInfo struct { - Elements []OptionSourceInfo -} - -func (*ArrayLiteralSourceInfo) isChildSourceInfo() {} - -// MessageLiteralSourceInfo represents source info paths for the child -// elements of an [*ast.MessageLiteralNode]. -type MessageLiteralSourceInfo struct { - Fields map[*ast.MessageFieldNode]*OptionSourceInfo -} - -func (*MessageLiteralSourceInfo) isChildSourceInfo() {} - // GenerateSourceInfo generates source code info for the given AST. If the given // opts is present, it can generate source code info for interpreted options. // Otherwise, any options in the AST will get source code info as uninterpreted // options. -func GenerateSourceInfo(file *ast.FileNode, opts OptionIndex, genOpts ...GenerateOption) *descriptorpb.SourceCodeInfo { - if file == nil { - return nil - } - sci := sourceCodeInfo{file: file, commentsUsed: map[ast.SourcePos]struct{}{}} - for _, sourceInfoOpt := range genOpts { - sourceInfoOpt.apply(&sci) - } - generateSourceInfoForFile(opts, &sci, file) - return &descriptorpb.SourceCodeInfo{Location: sci.locs} -} - -// GenerateOption represents an option for how source code info is generated. -type GenerateOption interface { - apply(*sourceCodeInfo) -} - -// WithExtraComments will result in source code info that contains extra comments. -// By default, comments are only generated for full declarations. Inline comments -// around elements of a declaration are not included in source code info. This option -// changes that behavior so that as many comments as possible are described in the -// source code info. -func WithExtraComments() GenerateOption { - return extraCommentsOption{} -} - -// WithExtraOptionLocations will result in source code info that contains extra -// locations to describe elements inside of a message literal. By default, option -// values are treated as opaque, so the only locations included are for the entire -// option value. But with this option, paths to the various fields set inside a -// message literal will also have locations. This makes it possible for usages of -// the source code info to report precise locations for specific fields inside the -// value. -func WithExtraOptionLocations() GenerateOption { - return extraOptionLocationsOption{} +// +// This includes comments only for locations that represent complete declarations. +// This is the same behavior as protoc, the reference compiler for Protocol Buffers. +func GenerateSourceInfo(file *ast.FileNode, opts options.Index) *descriptorpb.SourceCodeInfo { + return generateSourceInfo(file, opts, false) } -type extraCommentsOption struct{} - -func (e extraCommentsOption) apply(info *sourceCodeInfo) { - info.extraComments = true +// GenerateSourceInfoWithExtraComments generates source code info for the given +// AST. If the given opts is present, it can generate source code info for +// interpreted options. Otherwise, any options in the AST will get source code +// info as uninterpreted options. +// +// This includes comments for all locations. This is still lossy, but less so as +// it preserves far more comments from the source file. +func GenerateSourceInfoWithExtraComments(file *ast.FileNode, opts options.Index) *descriptorpb.SourceCodeInfo { + return generateSourceInfo(file, opts, true) } -type extraOptionLocationsOption struct{} - -func (e extraOptionLocationsOption) apply(info *sourceCodeInfo) { - info.extraOptionLocs = true -} +func generateSourceInfo(file *ast.FileNode, opts options.Index, extraComments bool) *descriptorpb.SourceCodeInfo { + if file == nil { + return nil + } -func generateSourceInfoForFile(opts OptionIndex, sci *sourceCodeInfo, file *ast.FileNode) { - path := make([]int32, 0, 16) + sci := sourceCodeInfo{file: file, commentsUsed: map[ast.SourcePos]struct{}{}, extraComments: extraComments} + path := make([]int32, 0, 10) sci.newLocWithoutComments(file, nil) if file.Syntax != nil { sci.newLocWithComments(file.Syntax, append(path, internal.FileSyntaxTag)) } - if file.Edition != nil { - sci.newLocWithComments(file.Edition, append(path, internal.FileEditionTag)) - } var depIndex, pubDepIndex, weakDepIndex, optIndex, msgIndex, enumIndex, extendIndex, svcIndex int32 @@ -169,39 +85,44 @@ func generateSourceInfoForFile(opts OptionIndex, sci *sourceCodeInfo, file *ast. case *ast.PackageNode: sci.newLocWithComments(child, append(path, internal.FilePackageTag)) case *ast.OptionNode: - generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.FileOptionsTag)) + generateSourceCodeInfoForOption(opts, &sci, child, false, &optIndex, append(path, internal.FileOptionsTag)) case *ast.MessageNode: - generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.FileMessagesTag, msgIndex)) + generateSourceCodeInfoForMessage(opts, &sci, child, nil, append(path, internal.FileMessagesTag, msgIndex)) msgIndex++ case *ast.EnumNode: - generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.FileEnumsTag, enumIndex)) + generateSourceCodeInfoForEnum(opts, &sci, child, append(path, internal.FileEnumsTag, enumIndex)) enumIndex++ case *ast.ExtendNode: - extsPath := append(path, internal.FileExtensionsTag) //nolint:gocritic // intentionally creating new slice var - // we clone the path here so that append can't mutate extsPath, since they may share storage - msgsPath := append(internal.ClonePath(path), internal.FileMessagesTag) - generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &msgIndex, extsPath, msgsPath) + generateSourceCodeInfoForExtensions(opts, &sci, child, &extendIndex, &msgIndex, append(path, internal.FileExtensionsTag), append(dup(path), internal.FileMessagesTag)) case *ast.ServiceNode: - generateSourceCodeInfoForService(opts, sci, child, append(path, internal.FileServicesTag, svcIndex)) + generateSourceCodeInfoForService(opts, &sci, child, append(path, internal.FileServicesTag, svcIndex)) svcIndex++ } } + + return &descriptorpb.SourceCodeInfo{Location: sci.locs} } -func generateSourceCodeInfoForOption(opts OptionIndex, sci *sourceCodeInfo, n *ast.OptionNode, compact bool, uninterpIndex *int32, path []int32) { +func generateSourceCodeInfoForOption(opts options.Index, sci *sourceCodeInfo, n *ast.OptionNode, compact bool, uninterpIndex *int32, path []int32) { if !compact { sci.newLocWithoutComments(n, path) } - optInfo := opts[n] - if optInfo != nil { - fullPath := combinePathsForOption(path, optInfo.Path) + subPath := opts[n] + if len(subPath) > 0 { + p := make([]int32, len(path), len(path)+len(subPath)) + copy(p, path) + if subPath[0] == -1 { + // used by "default" and "json_name" field pseudo-options + // to attribute path to parent element (since those are + // stored directly on the descriptor, not its options) + subPath = subPath[1:] + p = p[:len(path)-1] + } + p = append(p, subPath...) if compact { - sci.newLoc(n, fullPath) + sci.newLoc(n, p) } else { - sci.newLocWithComments(n, fullPath) - } - if sci.extraOptionLocs { - generateSourceInfoForOptionChildren(sci, n.Val, path, fullPath, optInfo.Children) + sci.newLocWithComments(n, p) } return } @@ -237,76 +158,7 @@ func generateSourceCodeInfoForOption(opts OptionIndex, sci *sourceCodeInfo, n *a } } -func combinePathsForOption(prefix, optionPath []int32) []int32 { - fullPath := make([]int32, len(prefix), len(prefix)+len(optionPath)) - copy(fullPath, prefix) - if optionPath[0] == -1 { - // used by "default" and "json_name" field pseudo-options - // to attribute path to parent element (since those are - // stored directly on the descriptor, not its options) - optionPath = optionPath[1:] - fullPath = fullPath[:len(prefix)-1] - } - return append(fullPath, optionPath...) -} - -func generateSourceInfoForOptionChildren(sci *sourceCodeInfo, n ast.ValueNode, pathPrefix, path []int32, childInfo OptionChildrenSourceInfo) { - switch childInfo := childInfo.(type) { - case *ArrayLiteralSourceInfo: - if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok { - for i, val := range arrayLiteral.Elements { - elementInfo := childInfo.Elements[i] - fullPath := combinePathsForOption(pathPrefix, elementInfo.Path) - sci.newLoc(val, fullPath) - generateSourceInfoForOptionChildren(sci, val, pathPrefix, fullPath, elementInfo.Children) - } - } - case *MessageLiteralSourceInfo: - if msgLiteral, ok := n.(*ast.MessageLiteralNode); ok { - for _, fieldNode := range msgLiteral.Elements { - fieldInfo, ok := childInfo.Fields[fieldNode] - if !ok { - continue - } - fullPath := combinePathsForOption(pathPrefix, fieldInfo.Path) - locationNode := ast.Node(fieldNode) - if fieldNode.Name.IsAnyTypeReference() && fullPath[len(fullPath)-1] == internal.AnyValueTag { - // This is a special expanded Any. So also insert a location - // for the type URL field. - typeURLPath := make([]int32, len(fullPath)) - copy(typeURLPath, fullPath) - typeURLPath[len(typeURLPath)-1] = internal.AnyTypeURLTag - sci.newLoc(fieldNode.Name, fullPath) - // And create the next location so it's just the value, - // not the full field definition. - locationNode = fieldNode.Val - } - _, isArrayLiteral := fieldNode.Val.(*ast.ArrayLiteralNode) - if !isArrayLiteral { - // We don't include this with an array literal since the path - // is to the first element of the array. If we added it here, - // it would be redundant with the child info we add next, and - // it wouldn't be entirely correct since it only indicates the - // index of the first element in the array (and not the others). - sci.newLoc(locationNode, fullPath) - } - generateSourceInfoForOptionChildren(sci, fieldNode.Val, pathPrefix, fullPath, fieldInfo.Children) - } - } - case nil: - if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok { - // an array literal without child source info is an array of scalars - for i, val := range arrayLiteral.Elements { - // last element of path is starting index for array literal - elementPath := append(([]int32)(nil), path...) - elementPath[len(elementPath)-1] += int32(i) - sci.newLoc(val, elementPath) - } - } - } -} - -func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) { +func generateSourceCodeInfoForMessage(opts options.Index, sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) { var openBrace ast.Node var decls []ast.MessageElement @@ -314,10 +166,10 @@ func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n a case *ast.MessageNode: openBrace = n.OpenBrace decls = n.Decls - case *ast.SyntheticGroupMessageNode: + case *ast.GroupNode: openBrace = n.OpenBrace decls = n.Decls - case *ast.SyntheticMapEntryNode: + case *ast.MapFieldNode: sci.newLoc(n, path) // map entry so nothing else to do return @@ -331,7 +183,7 @@ func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n a sci.newLoc(n.MessageName(), append(fieldPath, internal.FieldTypeNameTag)) } - var optIndex, fieldIndex, oneofIndex, extendIndex, nestedMsgIndex int32 + var optIndex, fieldIndex, oneOfIndex, extendIndex, nestedMsgIndex int32 var nestedEnumIndex, extRangeIndex, reservedRangeIndex, reservedNameIndex int32 for _, child := range decls { switch child := child.(type) { @@ -341,25 +193,19 @@ func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n a generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) fieldIndex++ case *ast.GroupNode: - fldPath := append(path, internal.MessageFieldsTag, fieldIndex) //nolint:gocritic // intentionally creating new slice var + fldPath := path + fldPath = append(fldPath, internal.MessageFieldsTag, fieldIndex) generateSourceCodeInfoForField(opts, sci, child, fldPath) fieldIndex++ - // we clone the path here so that append can't mutate fldPath, since they may share storage - msgPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag, nestedMsgIndex) - generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, msgPath) + generateSourceCodeInfoForMessage(opts, sci, child, fldPath, append(dup(path), internal.MessageNestedMessagesTag, nestedMsgIndex)) nestedMsgIndex++ case *ast.MapFieldNode: generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex)) fieldIndex++ nestedMsgIndex++ - case *ast.OneofNode: - fldsPath := append(path, internal.MessageFieldsTag) //nolint:gocritic // intentionally creating new slice var - // we clone the path here and below so that append ops can't mutate - // fldPath or msgsPath, since they may otherwise share storage - msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag) - ooPath := append(internal.ClonePath(path), internal.MessageOneofsTag, oneofIndex) - generateSourceCodeInfoForOneof(opts, sci, child, &fieldIndex, &nestedMsgIndex, fldsPath, msgsPath, ooPath) - oneofIndex++ + case *ast.OneOfNode: + generateSourceCodeInfoForOneOf(opts, sci, child, &fieldIndex, &nestedMsgIndex, append(path, internal.MessageFieldsTag), append(dup(path), internal.MessageNestedMessagesTag), append(dup(path), internal.MessageOneOfsTag, oneOfIndex)) + oneOfIndex++ case *ast.MessageNode: generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.MessageNestedMessagesTag, nestedMsgIndex)) nestedMsgIndex++ @@ -367,10 +213,7 @@ func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n a generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.MessageEnumsTag, nestedEnumIndex)) nestedEnumIndex++ case *ast.ExtendNode: - extsPath := append(path, internal.MessageExtensionsTag) //nolint:gocritic // intentionally creating new slice var - // we clone the path here so that append can't mutate extsPath, since they may share storage - msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag) - generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &nestedMsgIndex, extsPath, msgsPath) + generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &nestedMsgIndex, append(path, internal.MessageExtensionsTag), append(dup(path), internal.MessageNestedMessagesTag)) case *ast.ExtensionRangeNode: generateSourceCodeInfoForExtensionRanges(opts, sci, child, &extRangeIndex, append(path, internal.MessageExtensionRangesTag)) case *ast.ReservedNode: @@ -396,7 +239,7 @@ func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n a } } -func generateSourceCodeInfoForEnum(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumNode, path []int32) { +func generateSourceCodeInfoForEnum(opts options.Index, sci *sourceCodeInfo, n *ast.EnumNode, path []int32) { sci.newBlockLocWithComments(n, n.OpenBrace, path) sci.newLoc(n.Name, append(path, internal.EnumNameTag)) @@ -431,7 +274,7 @@ func generateSourceCodeInfoForEnum(opts OptionIndex, sci *sourceCodeInfo, n *ast } } -func generateSourceCodeInfoForEnumValue(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumValueNode, path []int32) { +func generateSourceCodeInfoForEnumValue(opts options.Index, sci *sourceCodeInfo, n *ast.EnumValueNode, path []int32) { sci.newLocWithComments(n, path) sci.newLoc(n.Name, append(path, internal.EnumValNameTag)) sci.newLoc(n.Number, append(path, internal.EnumValNumberTag)) @@ -461,7 +304,7 @@ func generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo, n *ast.RangeNod } } -func generateSourceCodeInfoForExtensions(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) { +func generateSourceCodeInfoForExtensions(opts options.Index, sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) { sci.newBlockLocWithComments(n, n.OpenBrace, extendPath) for _, decl := range n.Decls { switch decl := decl.(type) { @@ -473,21 +316,21 @@ func generateSourceCodeInfoForExtensions(opts OptionIndex, sci *sourceCodeInfo, fldPath = append(fldPath, *extendIndex) generateSourceCodeInfoForField(opts, sci, decl, fldPath) *extendIndex++ - generateSourceCodeInfoForMessage(opts, sci, decl.AsMessage(), fldPath, append(msgPath, *msgIndex)) + generateSourceCodeInfoForMessage(opts, sci, decl, fldPath, append(msgPath, *msgIndex)) *msgIndex++ } } } -func generateSourceCodeInfoForOneof(opts OptionIndex, sci *sourceCodeInfo, n *ast.OneofNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneofPath []int32) { - sci.newBlockLocWithComments(n, n.OpenBrace, oneofPath) - sci.newLoc(n.Name, append(oneofPath, internal.OneofNameTag)) +func generateSourceCodeInfoForOneOf(opts options.Index, sci *sourceCodeInfo, n *ast.OneOfNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneOfPath []int32) { + sci.newBlockLocWithComments(n, n.OpenBrace, oneOfPath) + sci.newLoc(n.Name, append(oneOfPath, internal.OneOfNameTag)) var optIndex int32 for _, child := range n.Decls { switch child := child.(type) { case *ast.OptionNode: - generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(oneofPath, internal.OneofOptionsTag)) + generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(oneOfPath, internal.OneOfOptionsTag)) case *ast.FieldNode: generateSourceCodeInfoForField(opts, sci, child, append(fieldPath, *fieldIndex)) *fieldIndex++ @@ -496,13 +339,13 @@ func generateSourceCodeInfoForOneof(opts OptionIndex, sci *sourceCodeInfo, n *as fldPath = append(fldPath, *fieldIndex) generateSourceCodeInfoForField(opts, sci, child, fldPath) *fieldIndex++ - generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, append(nestedMsgPath, *nestedMsgIndex)) + generateSourceCodeInfoForMessage(opts, sci, child, fldPath, append(nestedMsgPath, *nestedMsgIndex)) *nestedMsgIndex++ } } } -func generateSourceCodeInfoForField(opts OptionIndex, sci *sourceCodeInfo, n ast.FieldDeclNode, path []int32) { +func generateSourceCodeInfoForField(opts options.Index, sci *sourceCodeInfo, n ast.FieldDeclNode, path []int32) { var fieldType string if f, ok := n.(*ast.FieldNode); ok { fieldType = string(f.FldType.AsIdentifier()) @@ -554,7 +397,7 @@ func generateSourceCodeInfoForField(opts OptionIndex, sci *sourceCodeInfo, n ast } } -func generateSourceCodeInfoForExtensionRanges(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtensionRangeNode, extRangeIndex *int32, path []int32) { +func generateSourceCodeInfoForExtensionRanges(opts options.Index, sci *sourceCodeInfo, n *ast.ExtensionRangeNode, extRangeIndex *int32, path []int32) { sci.newLocWithComments(n, path) startExtRangeIndex := *extRangeIndex for _, child := range n.Ranges { @@ -587,7 +430,7 @@ func generateSourceCodeInfoForExtensionRanges(opts OptionIndex, sci *sourceCodeI } } -func generateSourceCodeInfoForService(opts OptionIndex, sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) { +func generateSourceCodeInfoForService(opts options.Index, sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) { sci.newBlockLocWithComments(n, n.OpenBrace, path) sci.newLoc(n.Name, append(path, internal.ServiceNameTag)) var optIndex, rpcIndex int32 @@ -602,7 +445,7 @@ func generateSourceCodeInfoForService(opts OptionIndex, sci *sourceCodeInfo, n * } } -func generateSourceCodeInfoForMethod(opts OptionIndex, sci *sourceCodeInfo, n *ast.RPCNode, path []int32) { +func generateSourceCodeInfoForMethod(opts options.Index, sci *sourceCodeInfo, n *ast.RPCNode, path []int32) { if n.OpenBrace != nil { sci.newBlockLocWithComments(n, n.OpenBrace, path) } else { @@ -629,14 +472,15 @@ func generateSourceCodeInfoForMethod(opts OptionIndex, sci *sourceCodeInfo, n *a } type sourceCodeInfo struct { - file *ast.FileNode - extraComments bool - extraOptionLocs bool - locs []*descriptorpb.SourceCodeInfo_Location - commentsUsed map[ast.SourcePos]struct{} + file *ast.FileNode + extraComments bool + locs []*descriptorpb.SourceCodeInfo_Location + commentsUsed map[ast.SourcePos]struct{} } func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { + dup := make([]int32, len(path)) + copy(dup, path) var start, end ast.SourcePos if n == sci.file { // For files, we don't want to consider trailing EOF token @@ -659,7 +503,7 @@ func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { start, end = info.Start(), info.End() } sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ - Path: internal.ClonePath(path), + Path: dup, Span: makeSpan(start, end), }) } @@ -667,9 +511,11 @@ func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { func (sci *sourceCodeInfo) newLoc(n ast.Node, path []int32) { info := sci.file.NodeInfo(n) if !sci.extraComments { + dup := make([]int32, len(path)) + copy(dup, path) start, end := info.Start(), info.End() sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ - Path: internal.ClonePath(path), + Path: dup, Span: makeSpan(start, end), }) } else { @@ -730,11 +576,13 @@ func (sci *sourceCodeInfo) newLocWithGivenComments(nodeInfo ast.NodeInfo, detach detached[i] = sci.combineComments(cmts) } + dup := make([]int32, len(path)) + copy(dup, path) sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{ LeadingDetachedComments: detached, LeadingComments: lead, TrailingComments: trail, - Path: internal.ClonePath(path), + Path: dup, Span: makeSpan(nodeInfo.Start(), nodeInfo.End()), }) } @@ -960,3 +808,7 @@ func (sci *sourceCodeInfo) combineComments(comments comments) string { } return buf.String() } + +func dup(p []int32) []int32 { + return append(([]int32)(nil), p...) +} diff --git a/vendor/github.com/bufbuild/protocompile/std_imports.go b/vendor/github.com/bufbuild/protocompile/std_imports.go index a31232ac..dcf8553a 100644 --- a/vendor/github.com/bufbuild/protocompile/std_imports.go +++ b/vendor/github.com/bufbuild/protocompile/std_imports.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,7 @@ package protocompile import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - _ "google.golang.org/protobuf/types/gofeaturespb" // link in packages that include the standard protos included with protoc. - _ "google.golang.org/protobuf/types/known/anypb" + _ "google.golang.org/protobuf/types/known/anypb" // link in packages that include the standard protos included with protoc. _ "google.golang.org/protobuf/types/known/apipb" _ "google.golang.org/protobuf/types/known/durationpb" _ "google.golang.org/protobuf/types/known/emptypb" @@ -29,8 +28,6 @@ import ( _ "google.golang.org/protobuf/types/known/typepb" _ "google.golang.org/protobuf/types/known/wrapperspb" _ "google.golang.org/protobuf/types/pluginpb" - - "github.com/bufbuild/protocompile/internal/featuresext" ) // All files that are included with protoc are also included with this package @@ -47,7 +44,6 @@ func init() { "google/protobuf/duration.proto", "google/protobuf/empty.proto", "google/protobuf/field_mask.proto", - "google/protobuf/go_features.proto", "google/protobuf/source_context.proto", "google/protobuf/struct.proto", "google/protobuf/timestamp.proto", @@ -63,34 +59,4 @@ func init() { } standardImports[fn] = fd } - - otherFeatures := []struct { - Name string - GetDescriptor func() (protoreflect.FileDescriptor, error) - }{ - { - Name: "google/protobuf/cpp_features.proto", - GetDescriptor: featuresext.CppFeaturesDescriptor, - }, - { - Name: "google/protobuf/java_features.proto", - GetDescriptor: featuresext.JavaFeaturesDescriptor, - }, - } - for _, feature := range otherFeatures { - // First see if the program has generated Go code for this - // file linked in: - fd, err := protoregistry.GlobalFiles.FindFileByPath(feature.Name) - if err == nil { - standardImports[feature.Name] = fd - continue - } - fd, err = feature.GetDescriptor() - if err != nil { - // For these extensions to FeatureSet, we are lenient. If - // we can't load them, just ignore them. - continue - } - standardImports[feature.Name] = fd - } } diff --git a/vendor/github.com/bufbuild/protocompile/supported_editions.go b/vendor/github.com/bufbuild/protocompile/supported_editions.go deleted file mode 100644 index 72bd51f1..00000000 --- a/vendor/github.com/bufbuild/protocompile/supported_editions.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protocompile - -import ( - "google.golang.org/protobuf/types/descriptorpb" - - "github.com/bufbuild/protocompile/internal/editions" -) - -// IsEditionSupported returns true if this module can compile sources for -// the given edition. This returns true for the special EDITION_PROTO2 and -// EDITION_PROTO3 as well as all actual editions supported. -func IsEditionSupported(edition descriptorpb.Edition) bool { - return edition == descriptorpb.Edition_EDITION_PROTO2 || - edition == descriptorpb.Edition_EDITION_PROTO3 || - (edition >= editions.MinSupportedEdition && edition <= editions.MaxSupportedEdition) -} diff --git a/vendor/github.com/bufbuild/protocompile/walk/walk.go b/vendor/github.com/bufbuild/protocompile/walk/walk.go index 244fa720..e7a1ab3b 100644 --- a/vendor/github.com/bufbuild/protocompile/walk/walk.go +++ b/vendor/github.com/bufbuild/protocompile/walk/walk.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 Buf Technologies, Inc. +// Copyright 2020-2022 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -65,18 +65,36 @@ func Descriptors(file protoreflect.FileDescriptor, fn func(protoreflect.Descript // The exit function is called using a post-order traversal, where the function // is called for a descriptor only after it is called for any descendants. func DescriptorsEnterAndExit(file protoreflect.FileDescriptor, enter, exit func(protoreflect.Descriptor) error) error { - if err := walkContainer(file, enter, exit); err != nil { - return err + for i := 0; i < file.Messages().Len(); i++ { + msg := file.Messages().Get(i) + if err := messageDescriptor(msg, enter, exit); err != nil { + return err + } + } + for i := 0; i < file.Enums().Len(); i++ { + en := file.Enums().Get(i) + if err := enumDescriptor(en, enter, exit); err != nil { + return err + } + } + for i := 0; i < file.Extensions().Len(); i++ { + ext := file.Extensions().Get(i) + if err := enter(ext); err != nil { + return err + } + if exit != nil { + if err := exit(ext); err != nil { + return err + } + } } - services := file.Services() - for i, length := 0, services.Len(); i < length; i++ { - svc := services.Get(i) + for i := 0; i < file.Services().Len(); i++ { + svc := file.Services().Get(i) if err := enter(svc); err != nil { return err } - methods := svc.Methods() - for i, length := 0, methods.Len(); i < length; i++ { - mtd := methods.Get(i) + for i := 0; i < svc.Methods().Len(); i++ { + mtd := svc.Methods().Get(i) if err := enter(mtd); err != nil { return err } @@ -95,49 +113,12 @@ func DescriptorsEnterAndExit(file protoreflect.FileDescriptor, enter, exit func( return nil } -type container interface { - Messages() protoreflect.MessageDescriptors - Enums() protoreflect.EnumDescriptors - Extensions() protoreflect.ExtensionDescriptors -} - -func walkContainer(container container, enter, exit func(protoreflect.Descriptor) error) error { - messages := container.Messages() - for i, length := 0, messages.Len(); i < length; i++ { - msg := messages.Get(i) - if err := messageDescriptor(msg, enter, exit); err != nil { - return err - } - } - enums := container.Enums() - for i, length := 0, enums.Len(); i < length; i++ { - en := enums.Get(i) - if err := enumDescriptor(en, enter, exit); err != nil { - return err - } - } - exts := container.Extensions() - for i, length := 0, exts.Len(); i < length; i++ { - ext := exts.Get(i) - if err := enter(ext); err != nil { - return err - } - if exit != nil { - if err := exit(ext); err != nil { - return err - } - } - } - return nil -} - func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(protoreflect.Descriptor) error) error { if err := enter(msg); err != nil { return err } - fields := msg.Fields() - for i, length := 0, fields.Len(); i < length; i++ { - fld := fields.Get(i) + for i := 0; i < msg.Fields().Len(); i++ { + fld := msg.Fields().Get(i) if err := enter(fld); err != nil { return err } @@ -147,9 +128,8 @@ func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(prot } } } - oneofs := msg.Oneofs() - for i, length := 0, oneofs.Len(); i < length; i++ { - oo := oneofs.Get(i) + for i := 0; i < msg.Oneofs().Len(); i++ { + oo := msg.Oneofs().Get(i) if err := enter(oo); err != nil { return err } @@ -159,8 +139,28 @@ func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(prot } } } - if err := walkContainer(msg, enter, exit); err != nil { - return err + for i := 0; i < msg.Messages().Len(); i++ { + nested := msg.Messages().Get(i) + if err := messageDescriptor(nested, enter, exit); err != nil { + return err + } + } + for i := 0; i < msg.Enums().Len(); i++ { + en := msg.Enums().Get(i) + if err := enumDescriptor(en, enter, exit); err != nil { + return err + } + } + for i := 0; i < msg.Extensions().Len(); i++ { + ext := msg.Extensions().Get(i) + if err := enter(ext); err != nil { + return err + } + if exit != nil { + if err := exit(ext); err != nil { + return err + } + } } if exit != nil { if err := exit(msg); err != nil { @@ -174,9 +174,8 @@ func enumDescriptor(en protoreflect.EnumDescriptor, enter, exit func(protoreflec if err := enter(en); err != nil { return err } - vals := en.Values() - for i, length := 0, vals.Len(); i < length; i++ { - enVal := vals.Get(i) + for i := 0; i < en.Values().Len(); i++ { + enVal := en.Values().Get(i) if err := enter(enVal); err != nil { return err } @@ -236,12 +235,12 @@ func DescriptorProtos(file *descriptorpb.FileDescriptorProto, fn func(protorefle // the function is called for a descriptor proto only after it is called for any // descendants. func DescriptorProtosEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, proto.Message) error) error { - enterWithPath := func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error { + enterWithPath := func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error { return enter(n, m) } - var exitWithPath func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error + var exitWithPath func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error if exit != nil { - exitWithPath = func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error { + exitWithPath = func(n protoreflect.FullName, p protoreflect.SourcePath, m proto.Message) error { return exit(n, m) } } @@ -360,7 +359,7 @@ func (w *protoWalker) walkDescriptorProto(prefix string, path protoreflect.Sourc var p protoreflect.SourcePath if w.usePath { p = path - p = append(p, internal.MessageOneofsTag, int32(i)) + p = append(p, internal.MessageOneOfsTag, int32(i)) } fqn := prefix + oo.GetName() if err := w.enter(protoreflect.FullName(fqn), p, oo); err != nil { diff --git a/vendor/github.com/go-ping/ping/.editorconfig b/vendor/github.com/go-ping/ping/.editorconfig deleted file mode 100644 index 57abfdc2..00000000 --- a/vendor/github.com/go-ping/ping/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -# https://editorconfig.org - -root = true - -[*] -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true -charset = utf-8 -indent_style = space - -[Makefile] -indent_style = tab - -[*.go] -indent_style = tab diff --git a/vendor/github.com/go-ping/ping/.gitignore b/vendor/github.com/go-ping/ping/.gitignore deleted file mode 100644 index f527a0e0..00000000 --- a/vendor/github.com/go-ping/ping/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/ping -/dist diff --git a/vendor/github.com/go-ping/ping/.golangci.yml b/vendor/github.com/go-ping/ping/.golangci.yml deleted file mode 100644 index eb311f81..00000000 --- a/vendor/github.com/go-ping/ping/.golangci.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -issues: - exclude-rules: - - path: _test.go - linters: - - errcheck diff --git a/vendor/github.com/go-ping/ping/.goreleaser.yml b/vendor/github.com/go-ping/ping/.goreleaser.yml deleted file mode 100644 index 3c5cc0ac..00000000 --- a/vendor/github.com/go-ping/ping/.goreleaser.yml +++ /dev/null @@ -1,46 +0,0 @@ -project_name: ping -before: - hooks: - - go mod download -builds: -- binary: ping - dir: cmd/ping - goarch: - - amd64 - - arm - - arm64 - goarm: - - 6 - - 7 - goos: - - darwin - - freebsd - - linux - - windows -archives: -- files: - - LICENSE - - README.md - format_overrides: - - goos: windows - format: zip - wrap_in_directory: true -# TODO: Decide if we want packages (name conflcits with /bin/ping?) -# nfpms: -# homepage: https://github.com/go-ping/ping -# maintainer: 'Go Ping Maintainers ' -# description: Ping written in Go. -# license: MIT -# formats: -# - deb -# - rpm -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-{{ .ShortCommit }}" -changelog: - sort: asc - filters: - exclude: - - '^docs:' - - '^test:' diff --git a/vendor/github.com/go-ping/ping/CONTRIBUTING.md b/vendor/github.com/go-ping/ping/CONTRIBUTING.md deleted file mode 100644 index 001998de..00000000 --- a/vendor/github.com/go-ping/ping/CONTRIBUTING.md +++ /dev/null @@ -1,44 +0,0 @@ -# Contributing - -First off, thanks for taking the time to contribute! - -Remember that this is open source software so please consider the other people who will read your code. -Make it look nice for them, document your logic in comments and add or update the unit test cases. - -This library is used by various other projects, companies and individuals in live production environments so please discuss any breaking changes with us before making them. -Feel free to join us in the #go-ping channel of the [Gophers Slack](https://invite.slack.golangbridge.org/). - -## Pull Requests - -[Fork the repo on GitHub](https://github.com/go-ping/ping/fork) and clone it to your local machine. - -```bash -git clone https://github.com/YOUR_USERNAME/ping.git && cd ping -``` - -Here is a guide on [how to configure a remote repository](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/configuring-a-remote-for-a-fork). - -Check out a new branch, make changes, run tests, commit & sign-off, then push branch to your fork. - -```bash -$ git checkout -b -# edit files -$ make style vet test -$ git add -$ git commit -s -$ git push -``` - -Open a [new pull request](https://github.com/go-ping/ping/compare) in the main `go-ping/ping` repository. -Please describe the purpose of your PR and remember link it to any related issues. - -*We may ask you to rebase your feature branch or squash the commits in order to keep the history clean.* - -## Development Guides - -- Run `make style vet test` before committing your changes. -- Document your logic in code comments. -- Add tests for bug fixes and new features. -- Use UNIX-style (LF) line endings. -- End every file with a single blank line. -- Use the UTF-8 character set. diff --git a/vendor/github.com/go-ping/ping/LICENSE b/vendor/github.com/go-ping/ping/LICENSE deleted file mode 100644 index 5584bb00..00000000 --- a/vendor/github.com/go-ping/ping/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Cameron Sparr and contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-ping/ping/Makefile b/vendor/github.com/go-ping/ping/Makefile deleted file mode 100644 index d4b4bce7..00000000 --- a/vendor/github.com/go-ping/ping/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -GO ?= go -GOFMT ?= $(GO)fmt -GOOPTS ?= -GO111MODULE := -pkgs = ./... - -all: style vet build test - -.PHONY: build -build: - @echo ">> building ping" - GO111MODULE=$(GO111MODULE) $(GO) build $(GOOPTS) ./cmd/ping - -.PHONY: style -style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: test -test: - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test -race -cover $(GOOPTS) $(pkgs) - -.PHONY: vet -vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) diff --git a/vendor/github.com/go-ping/ping/README.md b/vendor/github.com/go-ping/ping/README.md deleted file mode 100644 index 8074587b..00000000 --- a/vendor/github.com/go-ping/ping/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# go-ping -[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-ping/ping)](https://pkg.go.dev/github.com/go-ping/ping) -[![Circle CI](https://circleci.com/gh/go-ping/ping.svg?style=svg)](https://circleci.com/gh/go-ping/ping) - -A simple but powerful ICMP echo (ping) library for Go, inspired by -[go-fastping](https://github.com/tatsushid/go-fastping). - -Here is a very simple example that sends and receives three packets: - -```go -pinger, err := ping.NewPinger("www.google.com") -if err != nil { - panic(err) -} -pinger.Count = 3 -err = pinger.Run() // Blocks until finished. -if err != nil { - panic(err) -} -stats := pinger.Statistics() // get send/receive/duplicate/rtt stats -``` - -Here is an example that emulates the traditional UNIX ping command: - -```go -pinger, err := ping.NewPinger("www.google.com") -if err != nil { - panic(err) -} - -// Listen for Ctrl-C. -c := make(chan os.Signal, 1) -signal.Notify(c, os.Interrupt) -go func() { - for _ = range c { - pinger.Stop() - } -}() - -pinger.OnRecv = func(pkt *ping.Packet) { - fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", - pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) -} - -pinger.OnDuplicateRecv = func(pkt *ping.Packet) { - fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v ttl=%v (DUP!)\n", - pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt, pkt.Ttl) -} - -pinger.OnFinish = func(stats *ping.Statistics) { - fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) - fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", - stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) - fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", - stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) -} - -fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) -err = pinger.Run() -if err != nil { - panic(err) -} -``` - -It sends ICMP Echo Request packet(s) and waits for an Echo Reply in -response. If it receives a response, it calls the `OnRecv` callback -unless a packet with that sequence number has already been received, -in which case it calls the `OnDuplicateRecv` callback. When it's -finished, it calls the `OnFinish` callback. - -For a full ping example, see -[cmd/ping/ping.go](https://github.com/go-ping/ping/blob/master/cmd/ping/ping.go). - -## Installation - -``` -go get -u github.com/go-ping/ping -``` - -To install the native Go ping executable: - -```bash -go get -u github.com/go-ping/ping/... -$GOPATH/bin/ping -``` - -## Supported Operating Systems - -### Linux -This library attempts to send an "unprivileged" ping via UDP. On Linux, -this must be enabled with the following sysctl command: - -``` -sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" -``` - -If you do not wish to do this, you can call `pinger.SetPrivileged(true)` -in your code and then use setcap on your binary to allow it to bind to -raw sockets (or just run it as root): - -``` -setcap cap_net_raw=+ep /path/to/your/compiled/binary -``` - -See [this blog](https://sturmflut.github.io/linux/ubuntu/2015/01/17/unprivileged-icmp-sockets-on-linux/) -and the Go [x/net/icmp](https://godoc.org/golang.org/x/net/icmp) package -for more details. - -### Windows - -You must use `pinger.SetPrivileged(true)`, otherwise you will receive -the following error: - -``` -socket: The requested protocol has not been configured into the system, or no implementation for it exists. -``` - -Despite the method name, this should work without the need to elevate -privileges and has been tested on Windows 10. Please note that accessing -packet TTL values is not supported due to limitations in the Go -x/net/ipv4 and x/net/ipv6 packages. - -### Plan 9 from Bell Labs - -There is no support for Plan 9. This is because the entire `x/net/ipv4` -and `x/net/ipv6` packages are not implemented by the Go programming -language. - -## Maintainers and Getting Help: - -This repo was originally in the personal account of -[sparrc](https://github.com/sparrc), but is now maintained by the -[go-ping organization](https://github.com/go-ping). - -For support and help, you usually find us in the #go-ping channel of -Gophers Slack. See https://invite.slack.golangbridge.org/ for an invite -to the Gophers Slack org. - -## Contributing - -Refer to [CONTRIBUTING.md](https://github.com/go-ping/ping/blob/master/CONTRIBUTING.md) diff --git a/vendor/github.com/go-ping/ping/logger.go b/vendor/github.com/go-ping/ping/logger.go deleted file mode 100644 index be3d4fa2..00000000 --- a/vendor/github.com/go-ping/ping/logger.go +++ /dev/null @@ -1,53 +0,0 @@ -package ping - -import "log" - -type Logger interface { - Fatalf(format string, v ...interface{}) - Errorf(format string, v ...interface{}) - Warnf(format string, v ...interface{}) - Infof(format string, v ...interface{}) - Debugf(format string, v ...interface{}) -} - -type StdLogger struct { - Logger *log.Logger -} - -func (l StdLogger) Fatalf(format string, v ...interface{}) { - l.Logger.Printf("FATAL: "+format, v...) -} - -func (l StdLogger) Errorf(format string, v ...interface{}) { - l.Logger.Printf("ERROR: "+format, v...) -} - -func (l StdLogger) Warnf(format string, v ...interface{}) { - l.Logger.Printf("WARN: "+format, v...) -} - -func (l StdLogger) Infof(format string, v ...interface{}) { - l.Logger.Printf("INFO: "+format, v...) -} - -func (l StdLogger) Debugf(format string, v ...interface{}) { - l.Logger.Printf("DEBUG: "+format, v...) -} - -type NoopLogger struct { -} - -func (l NoopLogger) Fatalf(format string, v ...interface{}) { -} - -func (l NoopLogger) Errorf(format string, v ...interface{}) { -} - -func (l NoopLogger) Warnf(format string, v ...interface{}) { -} - -func (l NoopLogger) Infof(format string, v ...interface{}) { -} - -func (l NoopLogger) Debugf(format string, v ...interface{}) { -} diff --git a/vendor/github.com/go-ping/ping/packetconn.go b/vendor/github.com/go-ping/ping/packetconn.go deleted file mode 100644 index 6a972f5a..00000000 --- a/vendor/github.com/go-ping/ping/packetconn.go +++ /dev/null @@ -1,103 +0,0 @@ -package ping - -import ( - "net" - "runtime" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -type packetConn interface { - Close() error - ICMPRequestType() icmp.Type - ReadFrom(b []byte) (n int, ttl int, src net.Addr, err error) - SetFlagTTL() error - SetReadDeadline(t time.Time) error - WriteTo(b []byte, dst net.Addr) (int, error) - SetTTL(ttl int) -} - -type icmpConn struct { - c *icmp.PacketConn - ttl int -} - -func (c *icmpConn) Close() error { - return c.c.Close() -} - -func (c *icmpConn) SetTTL(ttl int) { - c.ttl = ttl -} - -func (c *icmpConn) SetReadDeadline(t time.Time) error { - return c.c.SetReadDeadline(t) -} - -func (c *icmpConn) WriteTo(b []byte, dst net.Addr) (int, error) { - if c.c.IPv6PacketConn() != nil { - if err := c.c.IPv6PacketConn().SetHopLimit(c.ttl); err != nil { - return 0, err - } - } - if c.c.IPv4PacketConn() != nil { - if err := c.c.IPv4PacketConn().SetTTL(c.ttl); err != nil { - return 0, err - } - } - - return c.c.WriteTo(b, dst) -} - -type icmpv4Conn struct { - icmpConn -} - -func (c *icmpv4Conn) SetFlagTTL() error { - err := c.c.IPv4PacketConn().SetControlMessage(ipv4.FlagTTL, true) - if runtime.GOOS == "windows" { - return nil - } - return err -} - -func (c *icmpv4Conn) ReadFrom(b []byte) (int, int, net.Addr, error) { - ttl := -1 - n, cm, src, err := c.c.IPv4PacketConn().ReadFrom(b) - if cm != nil { - ttl = cm.TTL - } - return n, ttl, src, err -} - -func (c icmpv4Conn) ICMPRequestType() icmp.Type { - return ipv4.ICMPTypeEcho -} - -type icmpV6Conn struct { - icmpConn -} - -func (c *icmpV6Conn) SetFlagTTL() error { - err := c.c.IPv6PacketConn().SetControlMessage(ipv6.FlagHopLimit, true) - if runtime.GOOS == "windows" { - return nil - } - return err -} - -func (c *icmpV6Conn) ReadFrom(b []byte) (int, int, net.Addr, error) { - ttl := -1 - n, cm, src, err := c.c.IPv6PacketConn().ReadFrom(b) - if cm != nil { - ttl = cm.HopLimit - } - return n, ttl, src, err -} - -func (c icmpV6Conn) ICMPRequestType() icmp.Type { - return ipv6.ICMPTypeEchoRequest -} diff --git a/vendor/github.com/go-ping/ping/ping.go b/vendor/github.com/go-ping/ping/ping.go deleted file mode 100644 index ef277ab9..00000000 --- a/vendor/github.com/go-ping/ping/ping.go +++ /dev/null @@ -1,820 +0,0 @@ -// Package ping is a simple but powerful ICMP echo (ping) library. -// -// Here is a very simple example that sends and receives three packets: -// -// pinger, err := ping.NewPinger("www.google.com") -// if err != nil { -// panic(err) -// } -// pinger.Count = 3 -// err = pinger.Run() // blocks until finished -// if err != nil { -// panic(err) -// } -// stats := pinger.Statistics() // get send/receive/rtt stats -// -// Here is an example that emulates the traditional UNIX ping command: -// -// pinger, err := ping.NewPinger("www.google.com") -// if err != nil { -// panic(err) -// } -// // Listen for Ctrl-C. -// c := make(chan os.Signal, 1) -// signal.Notify(c, os.Interrupt) -// go func() { -// for _ = range c { -// pinger.Stop() -// } -// }() -// pinger.OnRecv = func(pkt *ping.Packet) { -// fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", -// pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) -// } -// pinger.OnFinish = func(stats *ping.Statistics) { -// fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) -// fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", -// stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) -// fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", -// stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) -// } -// fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) -// err = pinger.Run() -// if err != nil { -// panic(err) -// } -// -// It sends ICMP Echo Request packet(s) and waits for an Echo Reply in response. -// If it receives a response, it calls the OnRecv callback. When it's finished, -// it calls the OnFinish callback. -// -// For a full ping example, see "cmd/ping/ping.go". -// -package ping - -import ( - "bytes" - "errors" - "fmt" - "log" - "math" - "math/rand" - "net" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/google/uuid" - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" - "golang.org/x/sync/errgroup" -) - -const ( - timeSliceLength = 8 - trackerLength = len(uuid.UUID{}) - protocolICMP = 1 - protocolIPv6ICMP = 58 -) - -var ( - ipv4Proto = map[string]string{"icmp": "ip4:icmp", "udp": "udp4"} - ipv6Proto = map[string]string{"icmp": "ip6:ipv6-icmp", "udp": "udp6"} -) - -// New returns a new Pinger struct pointer. -func New(addr string) *Pinger { - r := rand.New(rand.NewSource(getSeed())) - firstUUID := uuid.New() - var firstSequence = map[uuid.UUID]map[int]struct{}{} - firstSequence[firstUUID] = make(map[int]struct{}) - return &Pinger{ - Count: -1, - Interval: time.Second, - RecordRtts: true, - Size: timeSliceLength + trackerLength, - Timeout: time.Duration(math.MaxInt64), - - addr: addr, - done: make(chan interface{}), - id: r.Intn(math.MaxUint16), - trackerUUIDs: []uuid.UUID{firstUUID}, - ipaddr: nil, - ipv4: false, - network: "ip", - protocol: "udp", - awaitingSequences: firstSequence, - TTL: 64, - logger: StdLogger{Logger: log.New(log.Writer(), log.Prefix(), log.Flags())}, - } -} - -// NewPinger returns a new Pinger and resolves the address. -func NewPinger(addr string) (*Pinger, error) { - p := New(addr) - return p, p.Resolve() -} - -// Pinger represents a packet sender/receiver. -type Pinger struct { - // Interval is the wait time between each packet send. Default is 1s. - Interval time.Duration - - // Timeout specifies a timeout before ping exits, regardless of how many - // packets have been received. - Timeout time.Duration - - // Count tells pinger to stop after sending (and receiving) Count echo - // packets. If this option is not specified, pinger will operate until - // interrupted. - Count int - - // Debug runs in debug mode - Debug bool - - // Number of packets sent - PacketsSent int - - // Number of packets received - PacketsRecv int - - // Number of duplicate packets received - PacketsRecvDuplicates int - - // Round trip time statistics - minRtt time.Duration - maxRtt time.Duration - avgRtt time.Duration - stdDevRtt time.Duration - stddevm2 time.Duration - statsMu sync.RWMutex - - // If true, keep a record of rtts of all received packets. - // Set to false to avoid memory bloat for long running pings. - RecordRtts bool - - // rtts is all of the Rtts - rtts []time.Duration - - // OnSetup is called when Pinger has finished setting up the listening socket - OnSetup func() - - // OnSend is called when Pinger sends a packet - OnSend func(*Packet) - - // OnRecv is called when Pinger receives and processes a packet - OnRecv func(*Packet) - - // OnFinish is called when Pinger exits - OnFinish func(*Statistics) - - // OnDuplicateRecv is called when a packet is received that has already been received. - OnDuplicateRecv func(*Packet) - - // Size of packet being sent - Size int - - // Tracker: Used to uniquely identify packets - Deprecated - Tracker uint64 - - // Source is the source IP address - Source string - - // Channel and mutex used to communicate when the Pinger should stop between goroutines. - done chan interface{} - lock sync.Mutex - - ipaddr *net.IPAddr - addr string - - // trackerUUIDs is the list of UUIDs being used for sending packets. - trackerUUIDs []uuid.UUID - - ipv4 bool - id int - sequence int - // awaitingSequences are in-flight sequence numbers we keep track of to help remove duplicate receipts - awaitingSequences map[uuid.UUID]map[int]struct{} - // network is one of "ip", "ip4", or "ip6". - network string - // protocol is "icmp" or "udp". - protocol string - - logger Logger - - TTL int -} - -type packet struct { - bytes []byte - nbytes int - ttl int -} - -// Packet represents a received and processed ICMP echo packet. -type Packet struct { - // Rtt is the round-trip time it took to ping. - Rtt time.Duration - - // IPAddr is the address of the host being pinged. - IPAddr *net.IPAddr - - // Addr is the string address of the host being pinged. - Addr string - - // NBytes is the number of bytes in the message. - Nbytes int - - // Seq is the ICMP sequence number. - Seq int - - // TTL is the Time To Live on the packet. - Ttl int - - // ID is the ICMP identifier. - ID int -} - -// Statistics represent the stats of a currently running or finished -// pinger operation. -type Statistics struct { - // PacketsRecv is the number of packets received. - PacketsRecv int - - // PacketsSent is the number of packets sent. - PacketsSent int - - // PacketsRecvDuplicates is the number of duplicate responses there were to a sent packet. - PacketsRecvDuplicates int - - // PacketLoss is the percentage of packets lost. - PacketLoss float64 - - // IPAddr is the address of the host being pinged. - IPAddr *net.IPAddr - - // Addr is the string address of the host being pinged. - Addr string - - // Rtts is all of the round-trip times sent via this pinger. - Rtts []time.Duration - - // MinRtt is the minimum round-trip time sent via this pinger. - MinRtt time.Duration - - // MaxRtt is the maximum round-trip time sent via this pinger. - MaxRtt time.Duration - - // AvgRtt is the average round-trip time sent via this pinger. - AvgRtt time.Duration - - // StdDevRtt is the standard deviation of the round-trip times sent via - // this pinger. - StdDevRtt time.Duration -} - -func (p *Pinger) updateStatistics(pkt *Packet) { - p.statsMu.Lock() - defer p.statsMu.Unlock() - - p.PacketsRecv++ - if p.RecordRtts { - p.rtts = append(p.rtts, pkt.Rtt) - } - - if p.PacketsRecv == 1 || pkt.Rtt < p.minRtt { - p.minRtt = pkt.Rtt - } - - if pkt.Rtt > p.maxRtt { - p.maxRtt = pkt.Rtt - } - - pktCount := time.Duration(p.PacketsRecv) - // welford's online method for stddev - // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm - delta := pkt.Rtt - p.avgRtt - p.avgRtt += delta / pktCount - delta2 := pkt.Rtt - p.avgRtt - p.stddevm2 += delta * delta2 - - p.stdDevRtt = time.Duration(math.Sqrt(float64(p.stddevm2 / pktCount))) -} - -// SetIPAddr sets the ip address of the target host. -func (p *Pinger) SetIPAddr(ipaddr *net.IPAddr) { - p.ipv4 = isIPv4(ipaddr.IP) - - p.ipaddr = ipaddr - p.addr = ipaddr.String() -} - -// IPAddr returns the ip address of the target host. -func (p *Pinger) IPAddr() *net.IPAddr { - return p.ipaddr -} - -// Resolve does the DNS lookup for the Pinger address and sets IP protocol. -func (p *Pinger) Resolve() error { - if len(p.addr) == 0 { - return errors.New("addr cannot be empty") - } - ipaddr, err := net.ResolveIPAddr(p.network, p.addr) - if err != nil { - return err - } - - p.ipv4 = isIPv4(ipaddr.IP) - - p.ipaddr = ipaddr - - return nil -} - -// SetAddr resolves and sets the ip address of the target host, addr can be a -// DNS name like "www.google.com" or IP like "127.0.0.1". -func (p *Pinger) SetAddr(addr string) error { - oldAddr := p.addr - p.addr = addr - err := p.Resolve() - if err != nil { - p.addr = oldAddr - return err - } - return nil -} - -// Addr returns the string ip address of the target host. -func (p *Pinger) Addr() string { - return p.addr -} - -// SetNetwork allows configuration of DNS resolution. -// * "ip" will automatically select IPv4 or IPv6. -// * "ip4" will select IPv4. -// * "ip6" will select IPv6. -func (p *Pinger) SetNetwork(n string) { - switch n { - case "ip4": - p.network = "ip4" - case "ip6": - p.network = "ip6" - default: - p.network = "ip" - } -} - -// SetPrivileged sets the type of ping pinger will send. -// false means pinger will send an "unprivileged" UDP ping. -// true means pinger will send a "privileged" raw ICMP ping. -// NOTE: setting to true requires that it be run with super-user privileges. -func (p *Pinger) SetPrivileged(privileged bool) { - if privileged { - p.protocol = "icmp" - } else { - p.protocol = "udp" - } -} - -// Privileged returns whether pinger is running in privileged mode. -func (p *Pinger) Privileged() bool { - return p.protocol == "icmp" -} - -// SetLogger sets the logger to be used to log events from the pinger. -func (p *Pinger) SetLogger(logger Logger) { - p.logger = logger -} - -// SetID sets the ICMP identifier. -func (p *Pinger) SetID(id int) { - p.id = id -} - -// ID returns the ICMP identifier. -func (p *Pinger) ID() int { - return p.id -} - -// Run runs the pinger. This is a blocking function that will exit when it's -// done. If Count or Interval are not specified, it will run continuously until -// it is interrupted. -func (p *Pinger) Run() error { - var conn packetConn - var err error - if p.Size < timeSliceLength+trackerLength { - return fmt.Errorf("size %d is less than minimum required size %d", p.Size, timeSliceLength+trackerLength) - } - if p.ipaddr == nil { - err = p.Resolve() - } - if err != nil { - return err - } - if conn, err = p.listen(); err != nil { - return err - } - defer conn.Close() - - conn.SetTTL(p.TTL) - return p.run(conn) -} - -func (p *Pinger) run(conn packetConn) error { - if err := conn.SetFlagTTL(); err != nil { - return err - } - defer p.finish() - - recv := make(chan *packet, 5) - defer close(recv) - - if handler := p.OnSetup; handler != nil { - handler() - } - - var g errgroup.Group - - g.Go(func() error { - defer p.Stop() - return p.recvICMP(conn, recv) - }) - - g.Go(func() error { - defer p.Stop() - return p.runLoop(conn, recv) - }) - - return g.Wait() -} - -func (p *Pinger) runLoop( - conn packetConn, - recvCh <-chan *packet, -) error { - logger := p.logger - if logger == nil { - logger = NoopLogger{} - } - - timeout := time.NewTicker(p.Timeout) - interval := time.NewTicker(p.Interval) - defer func() { - interval.Stop() - timeout.Stop() - }() - - if err := p.sendICMP(conn); err != nil { - return err - } - - for { - select { - case <-p.done: - return nil - - case <-timeout.C: - return nil - - case r := <-recvCh: - err := p.processPacket(r) - if err != nil { - // FIXME: this logs as FATAL but continues - logger.Fatalf("processing received packet: %s", err) - } - - case <-interval.C: - if p.Count > 0 && p.PacketsSent >= p.Count { - interval.Stop() - continue - } - err := p.sendICMP(conn) - if err != nil { - // FIXME: this logs as FATAL but continues - logger.Fatalf("sending packet: %s", err) - } - } - if p.Count > 0 && p.PacketsRecv >= p.Count { - return nil - } - } -} - -func (p *Pinger) Stop() { - p.lock.Lock() - defer p.lock.Unlock() - - open := true - select { - case _, open = <-p.done: - default: - } - - if open { - close(p.done) - } -} - -func (p *Pinger) finish() { - handler := p.OnFinish - if handler != nil { - s := p.Statistics() - handler(s) - } -} - -// Statistics returns the statistics of the pinger. This can be run while the -// pinger is running or after it is finished. OnFinish calls this function to -// get it's finished statistics. -func (p *Pinger) Statistics() *Statistics { - p.statsMu.RLock() - defer p.statsMu.RUnlock() - sent := p.PacketsSent - loss := float64(sent-p.PacketsRecv) / float64(sent) * 100 - s := Statistics{ - PacketsSent: sent, - PacketsRecv: p.PacketsRecv, - PacketsRecvDuplicates: p.PacketsRecvDuplicates, - PacketLoss: loss, - Rtts: p.rtts, - Addr: p.addr, - IPAddr: p.ipaddr, - MaxRtt: p.maxRtt, - MinRtt: p.minRtt, - AvgRtt: p.avgRtt, - StdDevRtt: p.stdDevRtt, - } - return &s -} - -type expBackoff struct { - baseDelay time.Duration - maxExp int64 - c int64 -} - -func (b *expBackoff) Get() time.Duration { - if b.c < b.maxExp { - b.c++ - } - - return b.baseDelay * time.Duration(rand.Int63n(1< 0 { - t = append(t, bytes.Repeat([]byte{1}, remainSize)...) - } - - body := &icmp.Echo{ - ID: p.id, - Seq: p.sequence, - Data: t, - } - - msg := &icmp.Message{ - Type: conn.ICMPRequestType(), - Code: 0, - Body: body, - } - - msgBytes, err := msg.Marshal(nil) - if err != nil { - return err - } - - for { - if _, err := conn.WriteTo(msgBytes, dst); err != nil { - if neterr, ok := err.(*net.OpError); ok { - if neterr.Err == syscall.ENOBUFS { - continue - } - } - return err - } - handler := p.OnSend - if handler != nil { - outPkt := &Packet{ - Nbytes: len(msgBytes), - IPAddr: p.ipaddr, - Addr: p.addr, - Seq: p.sequence, - ID: p.id, - } - handler(outPkt) - } - // mark this sequence as in-flight - p.awaitingSequences[currentUUID][p.sequence] = struct{}{} - p.PacketsSent++ - p.sequence++ - if p.sequence > 65535 { - newUUID := uuid.New() - p.trackerUUIDs = append(p.trackerUUIDs, newUUID) - p.awaitingSequences[newUUID] = make(map[int]struct{}) - p.sequence = 0 - } - break - } - - return nil -} - -func (p *Pinger) listen() (packetConn, error) { - var ( - conn packetConn - err error - ) - - if p.ipv4 { - var c icmpv4Conn - c.c, err = icmp.ListenPacket(ipv4Proto[p.protocol], p.Source) - conn = &c - } else { - var c icmpV6Conn - c.c, err = icmp.ListenPacket(ipv6Proto[p.protocol], p.Source) - conn = &c - } - - if err != nil { - p.Stop() - return nil, err - } - return conn, nil -} - -func bytesToTime(b []byte) time.Time { - var nsec int64 - for i := uint8(0); i < 8; i++ { - nsec += int64(b[i]) << ((7 - i) * 8) - } - return time.Unix(nsec/1000000000, nsec%1000000000) -} - -func isIPv4(ip net.IP) bool { - return len(ip.To4()) == net.IPv4len -} - -func timeToBytes(t time.Time) []byte { - nsec := t.UnixNano() - b := make([]byte, 8) - for i := uint8(0); i < 8; i++ { - b[i] = byte((nsec >> ((7 - i) * 8)) & 0xff) - } - return b -} - -var seed int64 = time.Now().UnixNano() - -// getSeed returns a goroutine-safe unique seed -func getSeed() int64 { - return atomic.AddInt64(&seed, 1) -} diff --git a/vendor/github.com/go-ping/ping/utils_linux.go b/vendor/github.com/go-ping/ping/utils_linux.go deleted file mode 100644 index a3e4e8c8..00000000 --- a/vendor/github.com/go-ping/ping/utils_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build linux -// +build linux - -package ping - -// Returns the length of an ICMP message. -func (p *Pinger) getMessageLength() int { - return p.Size + 8 -} - -// Attempts to match the ID of an ICMP packet. -func (p *Pinger) matchID(ID int) bool { - // On Linux we can only match ID if we are privileged. - if p.protocol == "icmp" { - if ID != p.id { - return false - } - } - return true -} diff --git a/vendor/github.com/go-ping/ping/utils_other.go b/vendor/github.com/go-ping/ping/utils_other.go deleted file mode 100644 index d229c41a..00000000 --- a/vendor/github.com/go-ping/ping/utils_other.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !linux && !windows -// +build !linux,!windows - -package ping - -// Returns the length of an ICMP message. -func (p *Pinger) getMessageLength() int { - return p.Size + 8 -} - -// Attempts to match the ID of an ICMP packet. -func (p *Pinger) matchID(ID int) bool { - if ID != p.id { - return false - } - return true -} diff --git a/vendor/github.com/go-ping/ping/utils_windows.go b/vendor/github.com/go-ping/ping/utils_windows.go deleted file mode 100644 index 37ab8b52..00000000 --- a/vendor/github.com/go-ping/ping/utils_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build windows -// +build windows - -package ping - -import ( - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -// Returns the length of an ICMP message, plus the IP packet header. -func (p *Pinger) getMessageLength() int { - if p.ipv4 { - return p.Size + 8 + ipv4.HeaderLen - } - return p.Size + 8 + ipv6.HeaderLen -} - -// Attempts to match the ID of an ICMP packet. -func (p *Pinger) matchID(ID int) bool { - if ID != p.id { - return false - } - return true -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index c6f66f10..6c16c255 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -56,7 +56,6 @@ type Unmarshaler struct { // implement JSONPBMarshaler so that the custom format can be produced. // // The JSON unmarshaling must follow the JSON to proto specification: -// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index e9438a93..685c80a6 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -55,7 +55,6 @@ type Marshaler struct { // implement JSONPBUnmarshaler so that the custom format can be parsed. // // The JSON marshaling must follow the proto to JSON specification: -// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index fdff3fdb..85f9f573 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,10 +127,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 00000000..d8156a60 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md deleted file mode 100644 index 7ed347d3..00000000 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ /dev/null @@ -1,21 +0,0 @@ -# Changelog - -## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) - - -### Features - -* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) - -### Fixes - -* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) - -## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) - - -### Bug Fixes - -* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) - -## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index a502fdc5..04fdf09f 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,22 +2,6 @@ We definitely welcome patches and contribution to this project! -### Tips - -Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). - -Always try to include a test case! If it is not possible or not necessary, -please explain why in the pull request description. - -### Releasing - -Commits that would precipitate a SemVer change, as described in the Conventional -Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) -to create a release candidate pull request. Once submitted, `release-please` -will create a release. - -For tips on how to work with `release-please`, see its documentation. - ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 3e9a6188..f765a46f 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) The uuid package generates and inspects UUIDs based on -[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +[RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,12 +9,10 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -```sh -go get github.com/google/uuid -``` +`go get github.com/google/uuid` ###### Documentation -[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index b2a0bc87..24b78edc 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This removes the "net" dependency, because it is not used in the browser. +// This remvoves the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d9..a57207ae 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,15 +56,11 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both -// the standard UUID forms defined in RFC 4122 -// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, -// Parse accepts non-standard strings such as the raw hex encoding -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, -// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are -// examined in the latter case. Parse should not be used to validate strings as -// it parses non-standard encodings as indicated above. +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -73,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { + if strings.ToLower(s[:9]) != "urn:uuid:" { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -105,8 +101,7 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34, - } { + 24, 26, 28, 30, 32, 34} { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -122,7 +117,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -150,8 +145,7 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34, - } { + 24, 26, 28, 30, 32, 34} { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -298,15 +292,3 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } - -// UUIDs is a slice of UUID types. -type UUIDs []UUID - -// Strings returns a string slice containing the string form of each UUID in uuids. -func (uuids UUIDs) Strings() []string { - var uuidStrs = make([]string, len(uuids)) - for i, uuid := range uuids { - uuidStrs[i] = uuid.String() - } - return uuidStrs -} diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go index 418632b7..e67cf494 100644 --- a/vendor/github.com/jhump/protoreflect/desc/cache.go +++ b/vendor/github.com/jhump/protoreflect/desc/cache.go @@ -46,3 +46,12 @@ func (c mapCache) get(d protoreflect.Descriptor) Descriptor { func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) { c[key] = val } + +type noopCache struct{} + +func (noopCache) get(protoreflect.Descriptor) Descriptor { + return nil +} + +func (noopCache) put(protoreflect.Descriptor, Descriptor) { +} diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go index 01a6e9ea..9aa72a32 100644 --- a/vendor/github.com/jhump/protoreflect/desc/convert.go +++ b/vendor/github.com/jhump/protoreflect/desc/convert.go @@ -95,7 +95,9 @@ func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorP ret.deps = make([]*FileDescriptor, len(fd.GetDependency())) for i := 0; i < d.Imports().Len(); i++ { f := d.Imports().Get(i).FileDescriptor - if c, err := wrapFile(f, cache); err != nil { + if c := cache.get(f); c != nil { + ret.deps[i] = c.(*FileDescriptor) + } else if c, err := wrapFile(f, cache); err != nil { return nil, err } else { ret.deps[i] = c diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go index 38b8f51b..6903a3ab 100644 --- a/vendor/github.com/jhump/protoreflect/desc/descriptor.go +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go @@ -155,22 +155,10 @@ func (fd *FileDescriptor) String() string { } // IsProto3 returns true if the file declares a syntax of "proto3". -// -// When this returns false, the file is either syntax "proto2" (if -// Edition() returns zero) or the file uses editions. func (fd *FileDescriptor) IsProto3() bool { return fd.wrapped.Syntax() == protoreflect.Proto3 } -// Edition returns the edition of the file. If the file does not -// use editions syntax, zero is returned. -func (fd *FileDescriptor) Edition() descriptorpb.Edition { - if fd.wrapped.Syntax() == protoreflect.Editions { - return fd.proto.GetEdition() - } - return 0 -} - // GetDependencies returns all of this file's dependencies. These correspond to // import statements in the file. func (fd *FileDescriptor) GetDependencies() []*FileDescriptor { diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go index d7259e4a..9f160a3e 100644 --- a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go +++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go @@ -6,30 +6,11 @@ import ( "google.golang.org/protobuf/types/dynamicpb" ) -// RegisterExtensionsFromImportedFile registers extensions in the given file as well -// as those in its public imports. So if another file imports the given fd, this adds -// all extensions made visible to that importing file. -// -// All extensions in the given file are made visible to the importing file, and so are -// extensions in any public imports in the given file. -func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { - registerTypesForFile(reg, fd, true, true) -} - -// RegisterExtensionsVisibleToFile registers all extensions visible to the given file. -// This includes all extensions defined in fd and as well as extensions defined in the -// files that it imports (and any public imports thereof, etc). -// -// This is effectively the same as registering the extensions in fd and then calling -// RegisterExtensionsFromImportedFile for each file imported by fd. -func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { +func RegisterExtensionsForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { registerTypesForFile(reg, fd, true, false) } -// RegisterTypesVisibleToFile registers all types visible to the given file. -// This is the same as RegisterExtensionsVisibleToFile but it also registers -// message and enum types, not just extensions. -func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { +func RegisterTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) { registerTypesForFile(reg, fd, false, false) } diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go index 595c8720..fcadbd1f 100644 --- a/vendor/github.com/jhump/protoreflect/desc/internal/util.go +++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go @@ -54,9 +54,6 @@ const ( // File_syntaxTag is the tag number of the syntax element in a file // descriptor proto. File_syntaxTag = 12 - // File_editionTag is the tag number of the edition element in a file - // descriptor proto. - File_editionTag = 14 // Message_nameTag is the tag number of the name element in a message // descriptor proto. Message_nameTag = 1 diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go index 8776ab0b..193bbe88 100644 --- a/vendor/github.com/jhump/protoreflect/desc/load.go +++ b/vendor/github.com/jhump/protoreflect/desc/load.go @@ -53,6 +53,13 @@ func LoadFileDescriptor(file string) (*FileDescriptor, error) { var fd *FileDescriptor loadedDescriptors.withLock(func(cache descriptorCache) { + // double-check cache, in case it was concurrently added while + // we were waiting for the lock + f := cache.get(d) + if f != nil { + fd = f.(*FileDescriptor) + return + } fd, err = wrapFile(d, cache) }) return fd, err diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go index 2b6b1244..0bc09387 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go @@ -148,7 +148,7 @@ func convertASTMessageElement(f *ast.FileNode, el ast.MessageElement) ast2.Messa return convertASTField(f, el) case *ast.MapFieldNode: return convertASTMapField(f, el) - case *ast.OneofNode: + case *ast.OneOfNode: return convertASTOneOf(f, el) case *ast.GroupNode: return convertASTGroup(f, el) @@ -241,7 +241,7 @@ func convertASTGroup(f *ast.FileNode, g *ast.GroupNode) *ast2.GroupNode { ) } -func convertASTOneOf(f *ast.FileNode, oo *ast.OneofNode) *ast2.OneOfNode { +func convertASTOneOf(f *ast.FileNode, oo *ast.OneOfNode) *ast2.OneOfNode { decls := make([]ast2.OneOfElement, len(oo.Decls)) for i := range oo.Decls { decls[i] = convertASTOneOfElement(f, oo.Decls[i]) @@ -255,7 +255,7 @@ func convertASTOneOf(f *ast.FileNode, oo *ast.OneofNode) *ast2.OneOfNode { ) } -func convertASTOneOfElement(f *ast.FileNode, el ast.OneofElement) ast2.OneOfElement { +func convertASTOneOfElement(f *ast.FileNode, el ast.OneOfElement) ast2.OneOfElement { switch el := el.(type) { case *ast.OptionNode: return convertASTOption(f, el) @@ -515,6 +515,8 @@ func convertASTValue(f *ast.FileNode, v ast.ValueNode) ast2.ValueNode { return convertASTCompoundStringLiteral(f, v) case *ast.UintLiteralNode: return convertASTUintLiteral(f, v) + case *ast.PositiveUintLiteralNode: + return convertASTPositiveUintLiteral(f, v) case *ast.NegativeIntLiteralNode: return convertASTNegativeIntLiteral(f, v) case *ast.FloatLiteralNode: @@ -591,6 +593,8 @@ func convertASTInt(f *ast.FileNode, n ast.IntValueNode) ast2.IntValueNode { switch n := n.(type) { case *ast.UintLiteralNode: return convertASTUintLiteral(f, n) + case *ast.PositiveUintLiteralNode: + return convertASTPositiveUintLiteral(f, n) case *ast.NegativeIntLiteralNode: return convertASTNegativeIntLiteral(f, n) default: @@ -602,6 +606,10 @@ func convertASTUintLiteral(f *ast.FileNode, n *ast.UintLiteralNode) *ast2.UintLi return ast2.NewUintLiteralNode(n.Val, convertASTTokenInfo(f, n.Token())) } +func convertASTPositiveUintLiteral(f *ast.FileNode, n *ast.PositiveUintLiteralNode) *ast2.PositiveUintLiteralNode { + return ast2.NewPositiveUintLiteralNode(convertASTRune(f, n.Plus), convertASTUintLiteral(f, n.Uint)) +} + func convertASTNegativeIntLiteral(f *ast.FileNode, n *ast.NegativeIntLiteralNode) *ast2.NegativeIntLiteralNode { return ast2.NewNegativeIntLiteralNode(convertASTRune(f, n.Minus), convertASTUintLiteral(f, n.Uint)) } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go index 332cb0c3..2cef3c6c 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go @@ -19,8 +19,6 @@ type FileNode struct { Syntax *SyntaxNode // nil if file has no syntax declaration Decls []FileElement - // TODO: add Edition *EditionNode - // Any comments that follow the last token in the file. FinalComments []Comment // Any whitespace at the end of the file (after the last token or diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go index 91f5a354..c75f4481 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go @@ -175,10 +175,6 @@ func (n *UintLiteralNode) AsFloat() float64 { } // PositiveUintLiteralNode represents an integer literal with a positive (+) sign. -// -// Deprecated: A valid AST will not contain a node of this type. The Protobuf -// language does not actually allow a numeric literal to have a leading "+" -// positive sign. type PositiveUintLiteralNode struct { compositeNode Plus *RuneNode @@ -188,8 +184,6 @@ type PositiveUintLiteralNode struct { // NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both // arguments must be non-nil. -// -// Deprecated: The ast.PositiveUintLiteralNode node type should not be used. func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode { if sign == nil { panic("sign is nil") diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go index e9b85064..53301946 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go @@ -59,9 +59,6 @@ type Visitor struct { VisitFileNode func(*FileNode) (bool, *Visitor) // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST. VisitSyntaxNode func(*SyntaxNode) (bool, *Visitor) - - // TODO: add VisitEditionNode - // VisitPackageNode is invoked when visiting a *PackageNode in the AST. VisitPackageNode func(*PackageNode) (bool, *Visitor) // VisitImportNode is invoked when visiting an *ImportNode in the AST. @@ -115,8 +112,6 @@ type Visitor struct { // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST. VisitUintLiteralNode func(*UintLiteralNode) (bool, *Visitor) // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST. - // - // Deprecated: this node type will not actually be present in an AST. VisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) (bool, *Visitor) // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST. VisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) (bool, *Visitor) diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go index 0ec70bd7..c71d651d 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go @@ -62,12 +62,8 @@ type ErrorWithPos = reporter.ErrorWithPos // // SourcePos should always be set and never nil. type ErrorWithSourcePos struct { - // These fields are present and exported for backwards-compatibility - // with v1.4 and earlier. Underlying error Pos *SourcePos - - reporter.ErrorWithPos } // Error implements the error interface @@ -96,9 +92,8 @@ var _ ErrorWithPos = ErrorWithSourcePos{} func toErrorWithSourcePos(err ErrorWithPos) ErrorWithPos { pos := err.GetPosition() return ErrorWithSourcePos{ - ErrorWithPos: err, - Underlying: err.Unwrap(), - Pos: &pos, + Underlying: err.Unwrap(), + Pos: &pos, } } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go index 1a6763df..a1312d11 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go @@ -24,7 +24,6 @@ import ( "google.golang.org/protobuf/types/descriptorpb" "github.com/jhump/protoreflect/desc" - "github.com/jhump/protoreflect/desc/internal" "github.com/jhump/protoreflect/desc/protoparse/ast" ) @@ -40,12 +39,7 @@ func FileContentsFromMap(files map[string]string) FileAccessor { return func(filename string) (io.ReadCloser, error) { contents, ok := files[filename] if !ok { - // Try changing path separators since user-provided - // map may use different separators. - contents, ok = files[filepath.ToSlash(filename)] - if !ok { - return nil, os.ErrNotExist - } + return nil, os.ErrNotExist } return ioutil.NopCloser(strings.NewReader(contents)), nil } @@ -152,24 +146,17 @@ func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) srcInfoMode = protocompile.SourceInfoExtraComments } rep := newReporter(p.ErrorReporter, p.WarningReporter) - res, srcSpanAddr := p.getResolver(filenames) + res, srcPosAddr := p.getResolver(filenames) if p.InferImportPaths { // we must first compile everything to protos - results, err := parseToProtosRecursive(res, filenames, reporter.NewHandler(rep), srcSpanAddr) + results, err := parseToProtosRecursive(res, filenames, reporter.NewHandler(rep), srcPosAddr) if err != nil { return nil, err } // then we can infer import paths - var rewritten map[string]string - results, rewritten = fixupFilenames(results) - if len(rewritten) > 0 { - for i := range filenames { - if replace, ok := rewritten[filenames[i]]; ok { - filenames[i] = replace - } - } - } + // TODO: if this re-writes one of the names in filenames, lookups below will break + results = fixupFilenames(results) resolverFromResults := protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { res, ok := results[path] if !ok { @@ -192,10 +179,11 @@ func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) } fds := make([]protoreflect.FileDescriptor, len(results)) - alreadySeen := make(map[string]struct{}, len(results)) for i, res := range results { - removeDynamicExtensions(res, alreadySeen) - fds[i] = res + if linkRes, ok := res.(linker.Result); ok { + removeDynamicExtensions(linkRes.FileDescriptorProto()) + } + fds[i] = results[i] } return desc.WrapFiles(fds) } @@ -246,7 +234,6 @@ func (r noCloneParseResult) Clone() parser.Result { // ErrorReporter always returns nil, the parse fails with ErrInvalidSource. func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.FileDescriptorProto, error) { rep := newReporter(p.ErrorReporter, p.WarningReporter) - p.ImportPaths = nil // not used for this "do not link" operation. res, _ := p.getResolver(filenames) results, err := parseToProtos(res, filenames, reporter.NewHandler(rep), p.ValidateUnlinkedFiles) if err != nil { @@ -258,15 +245,7 @@ func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.Fil for _, res := range results { resultsMap[res.FileDescriptorProto().GetName()] = res } - var rewritten map[string]string - resultsMap, rewritten = fixupFilenames(resultsMap) - if len(rewritten) > 0 { - for i := range filenames { - if replace, ok := rewritten[filenames[i]]; ok { - filenames[i] = replace - } - } - } + resultsMap = fixupFilenames(resultsMap) for i := range filenames { results[i] = resultsMap[filenames[i]] } @@ -275,17 +254,17 @@ func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.Fil protos := make([]*descriptorpb.FileDescriptorProto, len(results)) for i, res := range results { protos[i] = res.FileDescriptorProto() - var optsIndex sourceinfo.OptionIndex + var optsIndex options.Index if p.InterpretOptionsInUnlinkedFiles { var err error optsIndex, err = options.InterpretUnlinkedOptions(res) if err != nil { return nil, err } - removeDynamicExtensionsFromProto(protos[i]) + removeDynamicExtensions(protos[i]) } if p.IncludeSourceCodeInfo { - protos[i].SourceCodeInfo = sourceinfo.GenerateSourceInfo(res.AST(), optsIndex, sourceinfo.WithExtraComments()) + protos[i].SourceCodeInfo = sourceinfo.GenerateSourceInfo(res.AST(), optsIndex) } } @@ -381,118 +360,51 @@ func parseToProtos(res protocompile.Resolver, filenames []string, rep *reporter. return results, nil } -func parseToProtosRecursive(res protocompile.Resolver, filenames []string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan) (map[string]parser.Result, error) { +func parseToProtosRecursive(res protocompile.Resolver, filenames []string, rep *reporter.Handler, srcPosAddr *SourcePos) (map[string]parser.Result, error) { results := make(map[string]parser.Result, len(filenames)) for _, filename := range filenames { - if err := parseToProtoRecursive(res, filename, rep, srcSpanAddr, results); err != nil { - return results, err - } + parseToProtoRecursive(res, filename, rep, srcPosAddr, results) } return results, rep.Error() } -func parseToProtoRecursive(res protocompile.Resolver, filename string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan, results map[string]parser.Result) error { +func parseToProtoRecursive(res protocompile.Resolver, filename string, rep *reporter.Handler, srcPosAddr *SourcePos, results map[string]parser.Result) { if _, ok := results[filename]; ok { // already processed this one - return nil + return } results[filename] = nil // placeholder entry - astRoot, parseResult, err := parseToAST(res, filename, rep) - if err != nil { - return err + astRoot, parseResult, _ := parseToAST(res, filename, rep) + if rep.ReporterError() != nil { + return } if parseResult == nil { - parseResult, err = parser.ResultFromAST(astRoot, true, rep) - if err != nil { - return err + parseResult, _ = parser.ResultFromAST(astRoot, true, rep) + if rep.ReporterError() != nil { + return } } results[filename] = parseResult - if astRoot != nil { - // We have an AST, so we use it to recursively examine imports. - for _, decl := range astRoot.Decls { - imp, ok := decl.(*ast2.ImportNode) - if !ok { - continue - } - err := func() error { - orig := *srcSpanAddr - *srcSpanAddr = astRoot.NodeInfo(imp.Name) - defer func() { - *srcSpanAddr = orig - }() - - return parseToProtoRecursive(res, imp.Name.AsString(), rep, srcSpanAddr, results) - }() - if err != nil { - return err - } + for _, decl := range astRoot.Decls { + imp, ok := decl.(*ast2.ImportNode) + if !ok { + continue } - return nil - } - - // Without an AST, we must recursively examine the proto. This makes it harder - // (but not necessarily impossible) to get the source location of the import. - fd := parseResult.FileDescriptorProto() - for i, dep := range fd.Dependency { - path := []int32{internal.File_dependencyTag, int32(i)} - err := func() error { - orig := *srcSpanAddr - found := false - for _, loc := range fd.GetSourceCodeInfo().GetLocation() { - if pathsEqual(loc.Path, path) { - start := SourcePos{ - Filename: dep, - Line: int(loc.Span[0]), - Col: int(loc.Span[1]), - } - var end SourcePos - if len(loc.Span) > 3 { - end = SourcePos{ - Filename: dep, - Line: int(loc.Span[2]), - Col: int(loc.Span[3]), - } - } else { - end = SourcePos{ - Filename: dep, - Line: int(loc.Span[0]), - Col: int(loc.Span[2]), - } - } - *srcSpanAddr = ast2.NewSourceSpan(start, end) - found = true - break - } - } - if !found { - *srcSpanAddr = ast2.UnknownSpan(dep) - } + func() { + orig := *srcPosAddr + *srcPosAddr = astRoot.NodeInfo(imp.Name).Start() defer func() { - *srcSpanAddr = orig + *srcPosAddr = orig }() - return parseToProtoRecursive(res, dep, rep, srcSpanAddr, results) + parseToProtoRecursive(res, imp.Name.AsString(), rep, srcPosAddr, results) }() - if err != nil { - return err - } - } - return nil -} - -func pathsEqual(a, b []int32) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false + if rep.ReporterError() != nil { + return } } - return true } func newReporter(errRep ErrorReporter, warnRep WarningReporter) reporter.Reporter { @@ -517,8 +429,8 @@ func newReporter(errRep ErrorReporter, warnRep WarningReporter) reporter.Reporte return reporter.NewReporter(errRep, warnRep) } -func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *ast2.SourceSpan) { - var srcSpan ast2.SourceSpan +func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *SourcePos) { + var srcPos SourcePos accessor := p.Accessor if accessor == nil { accessor = func(name string) (io.ReadCloser, error) { @@ -533,8 +445,8 @@ func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *ast2.So // errors that don't include the filename that failed are no bueno err = errorWithFilename{filename: filename, underlying: err} } - if srcSpan != nil { - err = reporter.Error(srcSpan, err) + if srcPos.Filename != "" { + err = reporter.Error(srcPos, err) } } return in, err @@ -561,20 +473,26 @@ func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *ast2.So })) } backupResolver := protocompile.WithStandardImports(importResolver) + mustBeSource := make(map[string]struct{}, len(filenames)) + for _, name := range filenames { + mustBeSource[name] = struct{}{} + } return protocompile.CompositeResolver{ sourceResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) { + if _, ok := mustBeSource[path]; ok { + return protocompile.SearchResult{}, os.ErrNotExist + } return backupResolver.FindFileByPath(path) }), - }, &srcSpan + }, &srcPos } -func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]parser.Result, rewrittenPaths map[string]string) { +func fixupFilenames(protos map[string]parser.Result) map[string]parser.Result { // In the event that the given filenames (keys in the supplied map) do not // match the actual paths used in 'import' statements in the files, we try // to revise names in the protos so that they will match and be linkable. - revisedProtos = make(map[string]parser.Result, len(protos)) - rewrittenPaths = make(map[string]string, len(protos)) + revisedProtos := map[string]parser.Result{} protoPaths := map[string]struct{}{} // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?) @@ -584,7 +502,7 @@ func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]p candidatesAvailable[name] = struct{}{} for _, f := range protos { for _, imp := range f.FileDescriptorProto().Dependency { - if strings.HasSuffix(name, imp) || strings.HasSuffix(imp, name) { + if strings.HasSuffix(name, imp) { candidates := importCandidates[imp] if candidates == nil { candidates = map[string]struct{}{} @@ -612,62 +530,37 @@ func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]p if best == "" { best = c } else { - // NB: We can't actually tell which file is supposed to match - // this import. So we prefer the longest name. On a tie, we - // choose the lexically earliest match. + // HACK: we can't actually tell which files is supposed to match + // this import, so arbitrarily pick the "shorter" one (fewest + // path elements) or, on a tie, the lexically earlier one minLen := strings.Count(best, string(filepath.Separator)) cLen := strings.Count(c, string(filepath.Separator)) - if cLen > minLen || (cLen == minLen && c < best) { + if cLen < minLen || (cLen == minLen && c < best) { best = c } } } if best != "" { - if len(best) > len(imp) { - prefix := best[:len(best)-len(imp)] + prefix := best[:len(best)-len(imp)] + if len(prefix) > 0 { protoPaths[prefix] = struct{}{} } f := protos[best] f.FileDescriptorProto().Name = proto.String(imp) revisedProtos[imp] = f - rewrittenPaths[best] = imp delete(candidatesAvailable, best) - - // If other candidates are actually references to the same file, remove them. - for c := range candidates { - if _, ok := candidatesAvailable[c]; !ok { - // already used this candidate and re-written its filename accordingly - continue - } - possibleDup := protos[c] - prevName := possibleDup.FileDescriptorProto().Name - possibleDup.FileDescriptorProto().Name = proto.String(imp) - if !proto.Equal(f.FileDescriptorProto(), protos[c].FileDescriptorProto()) { - // not equal: restore name and look at next one - possibleDup.FileDescriptorProto().Name = prevName - continue - } - // This file used a different name but was actually the same file. So - // we prune it from the set. - rewrittenPaths[c] = imp - delete(candidatesAvailable, c) - if len(c) > len(imp) { - prefix := c[:len(c)-len(imp)] - protoPaths[prefix] = struct{}{} - } - } } } if len(candidatesAvailable) == 0 { - return revisedProtos, rewrittenPaths + return revisedProtos } if len(protoPaths) == 0 { for c := range candidatesAvailable { revisedProtos[c] = protos[c] } - return revisedProtos, rewrittenPaths + return revisedProtos } // Any remaining candidates are entry-points (not imported by others), so @@ -696,32 +589,15 @@ func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]p f.FileDescriptorProto().Name = proto.String(imp) f.FileNode() revisedProtos[imp] = f - rewrittenPaths[c] = imp } else { revisedProtos[c] = protos[c] } } - return revisedProtos, rewrittenPaths -} - -func removeDynamicExtensions(fd protoreflect.FileDescriptor, alreadySeen map[string]struct{}) { - if _, ok := alreadySeen[fd.Path()]; ok { - // already processed - return - } - alreadySeen[fd.Path()] = struct{}{} - res, ok := fd.(linker.Result) - if ok { - removeDynamicExtensionsFromProto(res.FileDescriptorProto()) - } - // also remove extensions from dependencies - for i, length := 0, fd.Imports().Len(); i < length; i++ { - removeDynamicExtensions(fd.Imports().Get(i).FileDescriptor, alreadySeen) - } + return revisedProtos } -func removeDynamicExtensionsFromProto(fd *descriptorpb.FileDescriptorProto) { +func removeDynamicExtensions(fd *descriptorpb.FileDescriptorProto) { // protocompile returns descriptors with dynamic extension fields for custom options. // But protoparse only used known custom options and everything else defined in the // sources would be stored as unrecognized fields. So to bridge the difference in diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt index c9bc50b1..ef11ff4a 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt @@ -1542,7 +1542,7 @@ desc_test_complex.proto:31:30 > message_type[1] > extension_range: desc_test_complex.proto:33:9 -desc_test_complex.proto:33:91 +desc_test_complex.proto:33:81 > message_type[1] > extension_range[1]: @@ -1607,42 +1607,42 @@ desc_test_complex.proto:33:61 > message_type[1] > extension_range[1] > options: desc_test_complex.proto:33:62 -desc_test_complex.proto:33:90 +desc_test_complex.proto:33:80 > message_type[1] > extension_range[1] > options > (foo.bar.label): desc_test_complex.proto:33:63 -desc_test_complex.proto:33:89 +desc_test_complex.proto:33:79 > message_type[1] > extension_range[2] > options: desc_test_complex.proto:33:62 -desc_test_complex.proto:33:90 +desc_test_complex.proto:33:80 > message_type[1] > extension_range[2] > options > (foo.bar.label): desc_test_complex.proto:33:63 -desc_test_complex.proto:33:89 +desc_test_complex.proto:33:79 > message_type[1] > extension_range[3] > options: desc_test_complex.proto:33:62 -desc_test_complex.proto:33:90 +desc_test_complex.proto:33:80 > message_type[1] > extension_range[3] > options > (foo.bar.label): desc_test_complex.proto:33:63 -desc_test_complex.proto:33:89 +desc_test_complex.proto:33:79 > message_type[1] > extension_range[4] > options: desc_test_complex.proto:33:62 -desc_test_complex.proto:33:90 +desc_test_complex.proto:33:80 > message_type[1] > extension_range[4] > options > (foo.bar.label): desc_test_complex.proto:33:63 -desc_test_complex.proto:33:89 +desc_test_complex.proto:33:79 > message_type[1] > nested_type[1]: @@ -5426,17 +5426,17 @@ desc_test_complex.proto:275:99 > message_type[9] > field[0] > options > (foo.bar.string): desc_test_complex.proto:276:17 -desc_test_complex.proto:276:78 +desc_test_complex.proto:276:36 > message_type[9] > field[0] > options > (foo.bar.bytes): -desc_test_complex.proto:276:80 -desc_test_complex.proto:276:139 +desc_test_complex.proto:276:38 +desc_test_complex.proto:276:55 > message_type[9] > field[0] > options > (foo.bar.bool): -desc_test_complex.proto:276:141 -desc_test_complex.proto:276:154 +desc_test_complex.proto:276:57 +desc_test_complex.proto:276:70 > message_type[9] > field[0] > options > (foo.bar.float): diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go index 3106eaad..8cbb5bbb 100644 --- a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go @@ -2,6 +2,7 @@ package sourceinfo import ( "fmt" + "google.golang.org/protobuf/reflect/protoreflect" ) @@ -15,14 +16,6 @@ type fileDescriptor struct { locs protoreflect.SourceLocations } -func (f fileDescriptor) Edition() int32 { - ed, ok := f.FileDescriptor.(interface{ Edition() int32 }) - if ok { - return ed.Edition() - } - return 0 -} - func (f fileDescriptor) ParentFile() protoreflect.FileDescriptor { return f } diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go index 5491afda..82610a45 100644 --- a/vendor/github.com/jhump/protoreflect/desc/wrap.go +++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go @@ -21,7 +21,7 @@ type DescriptorWrapper interface { // WrapDescriptor wraps the given descriptor, returning a desc.Descriptor // value that represents the same element. func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) { - return wrapDescriptor(d, mapCache{}) + return wrapDescriptor(d, noopCache{}) } func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) { @@ -65,13 +65,10 @@ func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) { // WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor // value that represents the same file. func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) { - return wrapFile(d, mapCache{}) + return wrapFile(d, noopCache{}) } func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) { - if res := cache.get(d); res != nil { - return res.(*FileDescriptor), nil - } fdp := protoutil.ProtoFromFileDescriptor(d) return convertFile(d, fdp, cache) } @@ -79,7 +76,7 @@ func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescri // WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor // value that represents the same message. func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) { - return wrapMessage(d, mapCache{}) + return wrapMessage(d, noopCache{}) } func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) { @@ -100,7 +97,7 @@ func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*Mess // WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor // value that represents the same field. func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) { - return wrapField(d, mapCache{}) + return wrapField(d, noopCache{}) } func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) { @@ -124,7 +121,7 @@ func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDes // WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor // value that represents the same oneof. func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) { - return wrapOneOf(d, mapCache{}) + return wrapOneOf(d, noopCache{}) } func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) { @@ -141,7 +138,7 @@ func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDes // WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor // value that represents the same enum. func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) { - return wrapEnum(d, mapCache{}) + return wrapEnum(d, noopCache{}) } func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) { @@ -162,7 +159,7 @@ func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescri // WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor // value that represents the same enum value. func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) { - return wrapEnumValue(d, mapCache{}) + return wrapEnumValue(d, noopCache{}) } func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) { @@ -179,7 +176,7 @@ func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (* // WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor // value that represents the same service. func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) { - return wrapService(d, mapCache{}) + return wrapService(d, noopCache{}) } func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) { @@ -196,7 +193,7 @@ func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*Serv // WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor // value that represents the same method. func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) { - return wrapMethod(d, mapCache{}) + return wrapMethod(d, noopCache{}) } func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) { diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go deleted file mode 100644 index 0d5615fe..00000000 --- a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go +++ /dev/null @@ -1,137 +0,0 @@ -package grpcreflect - -import ( - refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1" - refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest { - var v1 refv1.ServerReflectionRequest - v1.Host = v1alpha.Host - switch mr := v1alpha.MessageRequest.(type) { - case *refv1alpha.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - case *refv1alpha.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - case *refv1alpha.ServerReflectionRequest_FileContainingExtension: - if mr.FileContainingExtension != nil { - v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &refv1.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - case *refv1alpha.ServerReflectionRequest_ListServices: - v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - default: - // no value set - } - return &v1 -} - -func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest { - var v1alpha refv1alpha.ServerReflectionRequest - v1alpha.Host = v1.Host - switch mr := v1.MessageRequest.(type) { - case *refv1.ServerReflectionRequest_FileByFilename: - if mr != nil { - v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - } - case *refv1.ServerReflectionRequest_FileContainingSymbol: - if mr != nil { - v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - } - case *refv1.ServerReflectionRequest_FileContainingExtension: - if mr != nil { - v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &refv1alpha.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType: - if mr != nil { - v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - } - case *refv1.ServerReflectionRequest_ListServices: - if mr != nil { - v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - } - default: - // no value set - } - return &v1alpha -} - -func toV1AlphaResponse(v1 *refv1.ServerReflectionResponse) *refv1alpha.ServerReflectionResponse { - var v1alpha refv1alpha.ServerReflectionResponse - v1alpha.ValidHost = v1.ValidHost - if v1.OriginalRequest != nil { - v1alpha.OriginalRequest = toV1AlphaRequest(v1.OriginalRequest) - } - switch mr := v1.MessageResponse.(type) { - case *refv1.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &refv1alpha.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *refv1.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &refv1alpha.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *refv1.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*refv1alpha.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &refv1alpha.ServiceResponse{ - Name: svc.GetName(), - } - } - v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &refv1alpha.ListServiceResponse{ - Service: svcs, - }, - } - } - case *refv1.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1alpha.MessageResponse = &refv1alpha.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &refv1alpha.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1alpha -} diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go index cb6bf568..b0e4bbb0 100644 --- a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go @@ -8,18 +8,17 @@ import ( "reflect" "runtime" "sync" - "sync/atomic" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" - refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1" refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/descriptorpb" "github.com/jhump/protoreflect/desc" + refv1 "github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1" "github.com/jhump/protoreflect/internal" ) @@ -63,31 +62,14 @@ const ( ) func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error { - if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol { - // no need to wrap - if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown { - // We previously didn't know symbol type but now do? - // Create a new error that has the right symbol type. - return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol} - } - return cause - } return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause} } func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error { - if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag { - // no need to wrap - return cause - } return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause} } func fileNotFound(file string, cause *elementNotFoundError) error { - if cause != nil && cause.kind == elementKindFile && cause.name == file { - // no need to wrap - return cause - } return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause} } @@ -98,15 +80,15 @@ func (e *elementNotFoundError) Error() string { if first { first = false } else { - _, _ = fmt.Fprint(&b, "\ncaused by: ") + fmt.Fprint(&b, "\ncaused by: ") } switch e.kind { case elementKindSymbol: - _, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name) + fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name) case elementKindExtension: - _, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name) + fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name) default: - _, _ = fmt.Fprintf(&b, "File not found: %s", e.name) + fmt.Fprintf(&b, "File not found: %s", e.name) } } return b.String() @@ -137,11 +119,10 @@ type extDesc struct { // Client is a client connection to a server for performing reflection calls // and resolving remote symbols. type Client struct { - ctx context.Context - now func() time.Time - stubV1 refv1.ServerReflectionClient - stubV1Alpha refv1alpha.ServerReflectionClient - allowMissing atomic.Bool + ctx context.Context + now func() time.Time + stubV1 refv1.ServerReflectionClient + stubV1Alpha refv1alpha.ServerReflectionClient connMu sync.Mutex cancel context.CancelFunc @@ -205,15 +186,6 @@ func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client { return newClient(ctx, stubv1, stubv1alpha) } -// AllowMissingFileDescriptors configures the client to allow missing files -// when building descriptors when possible. Missing files are often fatal -// errors, but with this option they can sometimes be worked around. Building -// a schema can only succeed with some files missing if the files in question -// only provide custom options and/or other unused types. -func (cr *Client) AllowMissingFileDescriptors() { - cr.allowMissing.Store(true) -} - // TODO: We should also have a NewClientV1. However that should not refer to internal // generated code. So it will have to wait until the grpc-go team fixes this issue: // https://github.com/grpc/grpc-go/issues/5684 @@ -382,34 +354,16 @@ func (cr *Client) getAndCacheFileDescriptors(req *refv1alpha.ServerReflectionReq } func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) { - allowMissing := cr.allowMissing.Load() - deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency())) - var deferredErr error - var missingDeps []int + deps := make([]*desc.FileDescriptor, len(fd.GetDependency())) for i, depName := range fd.GetDependency() { if dep, err := cr.FileByFilename(depName); err != nil { - if _, ok := err.(*elementNotFoundError); !ok || !allowMissing { - return nil, err - } - // We'll ignore for now to see if the file is really necessary. - // (If it only supplies custom options, we can get by without it.) - if deferredErr == nil { - deferredErr = err - } - missingDeps = append(missingDeps, i) + return nil, err } else { - deps = append(deps, dep) + deps[i] = dep } } - if len(missingDeps) > 0 { - fd = fileWithoutDeps(fd, missingDeps) - } d, err := desc.CreateFileDescriptor(fd, deps...) if err != nil { - if deferredErr != nil { - // assume the issue is the missing dep - return nil, deferredErr - } return nil, err } d = cr.cacheFile(d) @@ -561,15 +515,7 @@ func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1alpha. if attemptCount >= 3 && prevErr != nil { return nil, prevErr } - if (status.Code(prevErr) == codes.Unimplemented || - status.Code(prevErr) == codes.Unavailable) && - cr.useV1() { - // If v1 is unimplemented, fallback to v1alpha. - // We also fallback on unavailable because some servers have been - // observed to close the connection/cancel the stream, w/out sending - // back status or headers, when the service name is not known. When - // this happens, the RPC status code is unavailable. - // See https://github.com/fullstorydev/grpcurl/issues/434 + if status.Code(prevErr) == codes.Unimplemented && cr.useV1() { cr.useV1Alpha = true cr.lastTriedV1 = cr.now() } @@ -747,46 +693,6 @@ func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) ( } } -func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto { - // We need to rebuild the file without the missing deps. - fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto) - newNumDeps := len(fd.GetDependency()) - len(missingDeps) - newDeps := make([]string, 0, newNumDeps) - remapped := make(map[int]int, newNumDeps) - missingIdx := 0 - for i, dep := range fd.GetDependency() { - if missingIdx < len(missingDeps) { - if i == missingDeps[missingIdx] { - // This dep was missing. Skip it. - missingIdx++ - continue - } - } - remapped[i] = len(newDeps) - newDeps = append(newDeps, dep) - } - // Also rebuild public and weak import slices. - newPublic := make([]int32, 0, len(fd.GetPublicDependency())) - for _, idx := range fd.GetPublicDependency() { - newIdx, ok := remapped[int(idx)] - if ok { - newPublic = append(newPublic, int32(newIdx)) - } - } - newWeak := make([]int32, 0, len(fd.GetWeakDependency())) - for _, idx := range fd.GetWeakDependency() { - newIdx, ok := remapped[int(idx)] - if ok { - newWeak = append(newWeak, int32(newIdx)) - } - } - - fd.Dependency = newDeps - fd.PublicDependency = newPublic - fd.WeakDependency = newWeak - return fd -} - func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor { // search extensions in this scope for _, ext := range scope.extensions() { @@ -852,7 +758,7 @@ type adaptStreamFromV1 struct { } func (a adaptStreamFromV1) Send(request *refv1alpha.ServerReflectionRequest) error { - v1req := toV1Request(request) + v1req := refv1.ToV1Request(request) return a.ServerReflection_ServerReflectionInfoClient.Send(v1req) } @@ -861,5 +767,5 @@ func (a adaptStreamFromV1) Recv() (*refv1alpha.ServerReflectionResponse, error) if err != nil { return nil, err } - return toV1AlphaResponse(v1resp), nil + return refv1.ToV1AlphaResponse(v1resp), nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go similarity index 56% rename from vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go rename to vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go index 6f5c786b..5a11bd19 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.pb.go @@ -21,9 +21,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.28.1-devel // protoc v4.22.0 -// source: grpc/reflection/v1/reflection.proto +// source: grpcreflect/internal/grpc_reflection_v1/reflection.proto + +// NOTE: This package has been changed so that if the "canonical" version of this +// proto is compiled and linked into the same program, it won't result in an init +// failure (which can happen if two different Go packages try to define the same +// proto files/types). package grpc_reflection_v1 @@ -65,7 +70,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -78,7 +83,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -91,7 +96,7 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} } func (x *ServerReflectionRequest) GetHost() string { @@ -209,7 +214,7 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -222,7 +227,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -235,7 +240,7 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} } func (x *ExtensionRequest) GetContainingType() string { @@ -275,7 +280,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -288,7 +293,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -301,7 +306,7 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} } func (x *ServerReflectionResponse) GetValidHost() string { @@ -409,7 +414,7 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -422,7 +427,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -435,7 +440,7 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} } func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { @@ -461,7 +466,7 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -474,7 +479,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -487,7 +492,7 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} } func (x *ExtensionNumberResponse) GetBaseTypeName() string { @@ -518,7 +523,7 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -531,7 +536,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -544,7 +549,7 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} } func (x *ListServiceResponse) GetService() []*ServiceResponse { @@ -569,7 +574,7 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -582,7 +587,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -595,7 +600,7 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} } func (x *ServiceResponse) GetName() string { @@ -619,7 +624,7 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -632,7 +637,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + mi := &file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -645,7 +650,7 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} } func (x *ErrorResponse) GetErrorCode() int32 { @@ -662,153 +667,167 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, - 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, - 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, - 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, +var File_grpcreflect_internal_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x72, 0x70, 0x63, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x6a, 0x68, 0x75, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x22, 0x86, 0x03, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, + 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, + 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, + 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, + 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x75, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x65, 0x72, 0x22, 0x8e, 0x05, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x69, + 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x79, 0x0a, 0x18, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x6a, 0x68, + 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, + 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x16, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6a, + 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5d, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, + 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, + 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x67, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, + 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x32, 0xb0, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x9b, 0x01, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x3e, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3f, 0x2e, 0x6a, 0x68, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x72, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData = file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc ) -func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) +func file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData) }) - return file_grpc_reflection_v1_reflection_proto_rawDescData -} - -var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ - (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest - (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest - (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse - (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse - (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse - (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse - (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse - (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse -} -var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ - 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest - 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest - 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse - 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse - 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse - 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse - 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse - 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest - 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + return file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: jhump.protoreflect.grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: jhump.protoreflect.grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: jhump.protoreflect.grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: jhump.protoreflect.grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: jhump.protoreflect.grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: jhump.protoreflect.grpc.reflection.v1.ErrorResponse +} +var file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> jhump.protoreflect.grpc.reflection.v1.ExtensionRequest + 0, // 1: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + 3, // 2: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> jhump.protoreflect.grpc.reflection.v1.FileDescriptorResponse + 4, // 3: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ListServiceResponse + 7, // 5: jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> jhump.protoreflect.grpc.reflection.v1.ErrorResponse + 6, // 6: jhump.protoreflect.grpc.reflection.v1.ListServiceResponse.service:type_name -> jhump.protoreflect.grpc.reflection.v1.ServiceResponse + 0, // 7: jhump.protoreflect.grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionRequest + 2, // 8: jhump.protoreflect.grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> jhump.protoreflect.grpc.reflection.v1.ServerReflectionResponse 8, // [8:9] is the sub-list for method output_type 7, // [7:8] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name @@ -816,13 +835,13 @@ var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_grpc_reflection_v1_reflection_proto_init() } -func file_grpc_reflection_v1_reflection_proto_init() { - if File_grpc_reflection_v1_reflection_proto != nil { +func init() { file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_init() } +func file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_init() { + if File_grpcreflect_internal_grpc_reflection_v1_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -834,7 +853,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -846,7 +865,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -858,7 +877,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -870,7 +889,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -882,7 +901,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -894,7 +913,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -906,7 +925,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -919,14 +938,14 @@ func file_grpc_reflection_v1_reflection_proto_init() { } } } - file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -936,18 +955,18 @@ func file_grpc_reflection_v1_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + RawDescriptor: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + GoTypes: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_msgTypes, }.Build() - File_grpc_reflection_v1_reflection_proto = out.File - file_grpc_reflection_v1_reflection_proto_rawDesc = nil - file_grpc_reflection_v1_reflection_proto_goTypes = nil - file_grpc_reflection_v1_reflection_proto_depIdxs = nil + File_grpcreflect_internal_grpc_reflection_v1_reflection_proto = out.File + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpcreflect_internal_grpc_reflection_v1_reflection_proto_depIdxs = nil } diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto new file mode 100644 index 00000000..dac86edd --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection.proto @@ -0,0 +1,150 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +syntax = "proto3"; + +// NOTE: This package has been changed so that if the "canonical" version of this +// proto is compiled and linked into the same program, it won't result in an init +// failure (which can happen if two different Go packages try to define the same +// proto files/types). +package jhump.protoreflect.grpc.reflection.v1; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1"; +option java_multiple_files = true; +option java_package = "io.grpc.reflection.v1"; +option java_outer_classname = "ServerReflectionProto"; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go similarity index 76% rename from vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go rename to vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go index 62b56a8b..db204d61 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/reflection_grpc.pb.go @@ -1,29 +1,8 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection. A more complete description of how -// server reflection works can be found at -// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v4.22.0 -// source: grpc/reflection/v1/reflection.proto +// source: grpcreflect/internal/grpc_reflection_v1/reflection.proto package grpc_reflection_v1 @@ -39,10 +18,6 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" -) - // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -61,7 +36,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo", opts...) if err != nil { return nil, err } @@ -92,21 +67,23 @@ func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionRe } // ServerReflectionServer is the server API for ServerReflection service. -// All implementations should embed UnimplementedServerReflectionServer +// All implementations must embed UnimplementedServerReflectionServer // for forward compatibility type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + mustEmbedUnimplementedServerReflectionServer() } -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +// UnimplementedServerReflectionServer must be embedded to have forward compatible implementations. type UnimplementedServerReflectionServer struct { } func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } +func (UnimplementedServerReflectionServer) mustEmbedUnimplementedServerReflectionServer() {} // UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ServerReflectionServer will @@ -160,5 +137,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "grpc/reflection/v1/reflection.proto", + Metadata: "grpcreflect/internal/grpc_reflection_v1/reflection.proto", } diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go new file mode 100644 index 00000000..4e66d581 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1/svc_impl.go @@ -0,0 +1,240 @@ +package grpc_reflection_v1 + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +func Register(svr reflection.GRPCServer) { + reflection.Register(registrarInterceptor{svr}) +} + +type registrarInterceptor struct { + svr reflection.GRPCServer +} + +func (r registrarInterceptor) RegisterService(desc *grpc.ServiceDesc, impl interface{}) { + r.svr.RegisterService(&ServerReflection_ServiceDesc, reflectImpl{svr: impl.(grpc_reflection_v1alpha.ServerReflectionServer)}) +} + +func (r registrarInterceptor) GetServiceInfo() map[string]grpc.ServiceInfo { + // HACK: We're using generated code for a proto file where we hacked the proto package + // to avoid init-time issues (for a future where the grpc module also provides the same + // protos/types). But we've rewritten the service names in the generated code, so that + // we expose the expected service (e.g. w/out the hacked package name). That will lead + // to issues trying to load/resolve descriptors for the hacked service. So we remove + // it from the service info. + info := r.svr.GetServiceInfo() + delete(info, "grpc.reflection.v1.ServerReflection") + return info +} + +type reflectImpl struct { + svr grpc_reflection_v1alpha.ServerReflectionServer + UnimplementedServerReflectionServer +} + +func (r reflectImpl) ServerReflectionInfo(stream ServerReflection_ServerReflectionInfoServer) error { + return r.svr.ServerReflectionInfo(streamImpl{stream}) +} + +type streamImpl struct { + ServerReflection_ServerReflectionInfoServer +} + +func (s streamImpl) Send(response *grpc_reflection_v1alpha.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(ToV1Response(response)) +} + +func (s streamImpl) Recv() (*grpc_reflection_v1alpha.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return ToV1AlphaRequest(resp), nil +} + +func ToV1Request(v1alpha *grpc_reflection_v1alpha.ServerReflectionRequest) *ServerReflectionRequest { + var v1 ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *grpc_reflection_v1alpha.ServerReflectionRequest_ListServices: + v1.MessageRequest = &ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func ToV1AlphaRequest(v1 *ServerReflectionRequest) *grpc_reflection_v1alpha.ServerReflectionRequest { + var v1alpha grpc_reflection_v1alpha.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &grpc_reflection_v1alpha.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &grpc_reflection_v1alpha.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +func ToV1Response(v1alpha *grpc_reflection_v1alpha.ServerReflectionResponse) *ServerReflectionResponse { + var v1 ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = ToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *grpc_reflection_v1alpha.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &ListServiceResponse{ + Service: svcs, + }, + } + } + case *grpc_reflection_v1alpha.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} + +func ToV1AlphaResponse(v1 *ServerReflectionResponse) *grpc_reflection_v1alpha.ServerReflectionResponse { + var v1alpha grpc_reflection_v1alpha.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &grpc_reflection_v1alpha.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &grpc_reflection_v1alpha.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*grpc_reflection_v1alpha.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &grpc_reflection_v1alpha.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &grpc_reflection_v1alpha.ListServiceResponse{ + Service: svcs, + }, + } + } + case *ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &grpc_reflection_v1alpha.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &grpc_reflection_v1alpha.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go index 8cdd7d48..5d62bac9 100644 --- a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go +++ b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go @@ -16,7 +16,6 @@ package strutil import ( "fmt" "net/url" - "strings" "github.com/grafana/regexp" ) @@ -39,26 +38,6 @@ func GraphLinkForExpression(expr string) string { // SanitizeLabelName replaces anything that doesn't match // client_label.LabelNameRE with an underscore. -// Note: this does not handle all Prometheus label name restrictions (such as -// not starting with a digit 0-9), and hence should only be used if the label -// name is prefixed with a known valid string. func SanitizeLabelName(name string) string { return invalidLabelCharRE.ReplaceAllString(name, "_") } - -// SanitizeFullLabelName replaces any invalid character with an underscore, and -// if given an empty string, returns a string containing a single underscore. -func SanitizeFullLabelName(name string) string { - if len(name) == 0 { - return "_" - } - var validSb strings.Builder - for i, b := range name { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - validSb.WriteRune('_') - } else { - validSb.WriteRune(b) - } - } - return validSb.String() -} diff --git a/vendor/github.com/tidwall/tinylru/LICENSE b/vendor/github.com/tidwall/tinylru/LICENSE deleted file mode 100644 index 3c0c6ad8..00000000 --- a/vendor/github.com/tidwall/tinylru/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2020 Josh Baker - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/tidwall/tinylru/README.md b/vendor/github.com/tidwall/tinylru/README.md deleted file mode 100644 index 0c67dbd2..00000000 --- a/vendor/github.com/tidwall/tinylru/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# `tinylru` - -[![GoDoc](https://godoc.org/github.com/tidwall/tinylru?status.svg)](https://godoc.org/github.com/tidwall/tinylru) - -A fast little LRU cache. - -## Getting Started - -### Installing - -To start using `tinylru`, install Go and run go get: - -``` -$ go get -u github.com/tidwall/tinylru -``` - -This will retrieve the library. - -### Usage - -```go -// Create an LRU cache -var cache tinylru.LRU - -// Set the cache size. This is the maximum number of items that the cache can -// hold before evicting old items. The default size is 256. -cache.Resize(1024) - -// Set a key. Returns the previous value and ok if a previous value exists. -prev, ok := cache.Set("hello", "world") - -// Get a key. Returns the value and ok if the value exists. -value, ok := cache.Get("hello") - -// Delete a key. Returns the deleted value and ok if a previous value exists. -prev, ok := tr.Delete("hello") -``` - -A `Set` function may evict old items when adding a new item while LRU is at -capacity. If you want to know what was evicted then use the `SetEvicted` -function. - -```go -// Set a key and return the evicted item, if any. -prev, ok, evictedKey, evictedValue, evicted := cache.SetEvicted("hello", "jello") -``` - -### Contact - -Josh Baker [@tidwall](https://twitter.com/tidwall) - -### License - -`tinylru` source code is available under the MIT License. diff --git a/vendor/github.com/tidwall/tinylru/lru.go b/vendor/github.com/tidwall/tinylru/lru.go deleted file mode 100644 index 682230ab..00000000 --- a/vendor/github.com/tidwall/tinylru/lru.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2020 Joshua J Baker. All rights reserved. -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package tinylru - -import "sync" - -// DefaultSize is the default maximum size of an LRU cache before older items -// get automatically evicted. -const DefaultSize = 256 - -type lruItem struct { - key interface{} // user-defined key - value interface{} // user-defined value - prev *lruItem // prev item in list. More recently used - next *lruItem // next item in list. Less recently used -} - -// LRU implements an LRU cache -type LRU struct { - mu sync.RWMutex // protect all things - size int // max number of items. - items map[interface{}]*lruItem // active items - head *lruItem // head of list - tail *lruItem // tail of list -} - -//go:noinline -func (lru *LRU) init() { - lru.items = make(map[interface{}]*lruItem) - lru.head = new(lruItem) - lru.tail = new(lruItem) - lru.head.next = lru.tail - lru.tail.prev = lru.head - if lru.size == 0 { - lru.size = DefaultSize - } -} - -func (lru *LRU) evict() *lruItem { - item := lru.tail.prev - lru.pop(item) - delete(lru.items, item.key) - return item -} - -func (lru *LRU) pop(item *lruItem) { - item.prev.next = item.next - item.next.prev = item.prev -} - -func (lru *LRU) push(item *lruItem) { - lru.head.next.prev = item - item.next = lru.head.next - item.prev = lru.head - lru.head.next = item -} - -// Resize sets the maximum size of an LRU cache. If this value is less than -// the number of items currently in the cache, then items will be evicted. -// Returns evicted items. -// This operation will panic if the size is less than one. -func (lru *LRU) Resize(size int) (evictedKeys []interface{}, - evictedValues []interface{}) { - if size <= 0 { - panic("invalid size") - } - - lru.mu.Lock() - defer lru.mu.Unlock() - for size < len(lru.items) { - item := lru.evict() - evictedKeys = append(evictedKeys, item.key) - evictedValues = append(evictedValues, item.value) - } - lru.size = size - return evictedKeys, evictedValues -} - -// Len returns the length of the lru cache -func (lru *LRU) Len() int { - lru.mu.RLock() - defer lru.mu.RUnlock() - return len(lru.items) -} - -// SetEvicted sets or replaces a value for a key. If this operation causes an -// eviction then the evicted item is returned. -func (lru *LRU) SetEvicted(key interface{}, value interface{}) ( - prev interface{}, replaced bool, evictedKey interface{}, - evictedValue interface{}, evicted bool) { - lru.mu.Lock() - defer lru.mu.Unlock() - if lru.items == nil { - lru.init() - } - item := lru.items[key] - if item == nil { - if len(lru.items) == lru.size { - item = lru.evict() - evictedKey, evictedValue, evicted = item.key, item.value, true - } else { - item = new(lruItem) - } - item.key = key - item.value = value - lru.push(item) - lru.items[key] = item - } else { - prev, replaced = item.value, true - item.value = value - if lru.head.next != item { - lru.pop(item) - lru.push(item) - } - } - return prev, replaced, evictedKey, evictedValue, evicted -} - -// Set or replace a value for a key. -func (lru *LRU) Set(key interface{}, value interface{}) (prev interface{}, - replaced bool) { - prev, replaced, _, _, _ = lru.SetEvicted(key, value) - return prev, replaced -} - -// Get a value for key -func (lru *LRU) Get(key interface{}) (value interface{}, ok bool) { - lru.mu.Lock() - defer lru.mu.Unlock() - item := lru.items[key] - if item == nil { - return nil, false - } - if lru.head.next != item { - lru.pop(item) - lru.push(item) - } - return item.value, true -} - -// Contains returns true if the key exists. -func (lru *LRU) Contains(key interface{}) bool { - lru.mu.RLock() - defer lru.mu.RUnlock() - _, ok := lru.items[key] - return ok -} - -// Peek returns the value for key value without updating -// the recently used status. -func (lru *LRU) Peek(key interface{}) (value interface{}, ok bool) { - lru.mu.RLock() - defer lru.mu.RUnlock() - - if item := lru.items[key]; item != nil { - return item.value, true - } - return nil, false -} - -// Delete a value for a key -func (lru *LRU) Delete(key interface{}) (prev interface{}, deleted bool) { - lru.mu.Lock() - defer lru.mu.Unlock() - item := lru.items[key] - if item == nil { - return nil, false - } - delete(lru.items, key) - lru.pop(item) - return item.value, true -} - -// Range iterates over all key/values in the order of most recently to -// least recently used items. -// It's not safe to call other LRU operations while ranging. -func (lru *LRU) Range(iter func(key interface{}, value interface{}) bool) { - lru.mu.RLock() - defer lru.mu.RUnlock() - if head := lru.head; head != nil { - item := head.next - for item != lru.tail { - if !iter(item.key, item.value) { - return - } - item = item.next - } - } -} - -// Reverse iterates over all key/values in the order of least recently to -// most recently used items. -// It's not safe to call other LRU operations while ranging. -func (lru *LRU) Reverse(iter func(key interface{}, value interface{}) bool) { - lru.mu.RLock() - defer lru.mu.RUnlock() - if tail := lru.tail; tail != nil { - item := tail.prev - for item != lru.head { - if !iter(item.key, item.value) { - return - } - item = item.prev - } - } -} diff --git a/vendor/github.com/tidwall/wal/.gitignore b/vendor/github.com/tidwall/wal/.gitignore deleted file mode 100644 index b724714b..00000000 --- a/vendor/github.com/tidwall/wal/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -testlog -.idea -vendor \ No newline at end of file diff --git a/vendor/github.com/tidwall/wal/LICENSE b/vendor/github.com/tidwall/wal/LICENSE deleted file mode 100644 index 22e6fe0a..00000000 --- a/vendor/github.com/tidwall/wal/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 Joshua J Baker - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tidwall/wal/README.md b/vendor/github.com/tidwall/wal/README.md deleted file mode 100644 index 68cc0cd7..00000000 --- a/vendor/github.com/tidwall/wal/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# `wal` -[![GoDoc](https://godoc.org/github.com/tidwall/wal?status.svg)](https://godoc.org/github.com/tidwall/wal) - -A simple and fast write ahead log for Go. - -## Features - -- High durability -- Fast operations -- Monotonic indexes -- Batch writes -- Log truncation from front or back. - -## Getting Started - -### Installing - -To start using `wal`, install Go and run `go get`: - -```sh -$ go get -u github.com/tidwall/wal -``` - -This will retrieve the library. - -### Example - -```go -// open a new log file -log, err := Open("mylog", nil) - -// write some entries -err = log.Write(1, []byte("first entry")) -err = log.Write(2, []byte("second entry")) -err = log.Write(3, []byte("third entry")) - -// read an entry -data, err := log.Read(1) -println(string(data)) // output: first entry - -// close the log -err = log.Close() -``` - -Batch writes: - -```go - -// write three entries as a batch -batch := new(Batch) -batch.Write(1, []byte("first entry")) -batch.Write(2, []byte("second entry")) -batch.Write(3, []byte("third entry")) - -err = log.WriteBatch(batch) -``` - -Truncating: - -```go -// write some entries -err = log.Write(1, []byte("first entry")) -... -err = log.Write(1000, []byte("thousandth entry")) - -// truncate the log from index starting 350 and ending with 950. -err = l.TruncateFront(350) -err = l.TruncateBack(950) -``` - - - -## Contact - -Josh Baker [@tidwall](http://twitter.com/tidwall) - -## License - -`wal` source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/wal/wal.go b/vendor/github.com/tidwall/wal/wal.go deleted file mode 100644 index 42b8cd29..00000000 --- a/vendor/github.com/tidwall/wal/wal.go +++ /dev/null @@ -1,917 +0,0 @@ -package wal - -import ( - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "unicode/utf8" - "unsafe" - - "github.com/tidwall/gjson" - "github.com/tidwall/tinylru" -) - -var ( - // ErrCorrupt is returns when the log is corrupt. - ErrCorrupt = errors.New("log corrupt") - - // ErrClosed is returned when an operation cannot be completed because - // the log is closed. - ErrClosed = errors.New("log closed") - - // ErrNotFound is returned when an entry is not found. - ErrNotFound = errors.New("not found") - - // ErrOutOfOrder is returned from Write() when the index is not equal to - // LastIndex()+1. It's required that log monotonically grows by one and has - // no gaps. Thus, the series 10,11,12,13,14 is valid, but 10,11,13,14 is - // not because there's a gap between 11 and 13. Also, 10,12,11,13 is not - // valid because 12 and 11 are out of order. - ErrOutOfOrder = errors.New("out of order") - - // ErrOutOfRange is returned from TruncateFront() and TruncateBack() when - // the index not in the range of the log's first and last index. Or, this - // may be returned when the caller is attempting to remove *all* entries; - // The log requires that at least one entry exists following a truncate. - ErrOutOfRange = errors.New("out of range") -) - -// LogFormat is the format of the log files. -type LogFormat byte - -const ( - // Binary format writes entries in binary. This is the default and, unless - // a good reason otherwise, should be used in production. - Binary LogFormat = 0 - // JSON format writes entries as JSON lines. This causes larger, human - // readable files. - JSON LogFormat = 1 -) - -// Options for Log -type Options struct { - // NoSync disables fsync after writes. This is less durable and puts the - // log at risk of data loss when there's a server crash. - NoSync bool - // SegmentSize of each segment. This is just a target value, actual size - // may differ. Default is 20 MB. - SegmentSize int - // LogFormat is the format of the log files. Default is Binary. - LogFormat LogFormat - // SegmentCacheSize is the maximum number of segments that will be held in - // memory for caching. Increasing this value may enhance performance for - // concurrent read operations. Default is 1 - SegmentCacheSize int - // NoCopy allows for the Read() operation to return the raw underlying data - // slice. This is an optimization to help minimize allocations. When this - // option is set, do not modify the returned data because it may affect - // other Read calls. Default false - NoCopy bool - // Perms represents the datafiles modes and permission bits - DirPerms os.FileMode - FilePerms os.FileMode -} - -// DefaultOptions for Open(). -var DefaultOptions = &Options{ - NoSync: false, // Fsync after every write - SegmentSize: 20971520, // 20 MB log segment files. - LogFormat: Binary, // Binary format is small and fast. - SegmentCacheSize: 2, // Number of cached in-memory segments - NoCopy: false, // Make a new copy of data for every Read call. - DirPerms: 0750, // Permissions for the created directories - FilePerms: 0640, // Permissions for the created data files -} - -// Log represents a write ahead log -type Log struct { - mu sync.RWMutex - path string // absolute path to log directory - opts Options // log options - closed bool // log is closed - corrupt bool // log may be corrupt - segments []*segment // all known log segments - firstIndex uint64 // index of the first entry in log - lastIndex uint64 // index of the last entry in log - sfile *os.File // tail segment file handle - wbatch Batch // reusable write batch - scache tinylru.LRU // segment entries cache -} - -// segment represents a single segment file. -type segment struct { - path string // path of segment file - index uint64 // first index of segment - ebuf []byte // cached entries buffer - epos []bpos // cached entries positions in buffer -} - -type bpos struct { - pos int // byte position - end int // one byte past pos -} - -// Open a new write ahead log -func Open(path string, opts *Options) (*Log, error) { - if opts == nil { - opts = DefaultOptions - } - if opts.SegmentCacheSize <= 0 { - opts.SegmentCacheSize = DefaultOptions.SegmentCacheSize - } - if opts.SegmentSize <= 0 { - opts.SegmentSize = DefaultOptions.SegmentSize - } - if opts.DirPerms == 0 { - opts.DirPerms = DefaultOptions.DirPerms - } - if opts.FilePerms == 0 { - opts.FilePerms = DefaultOptions.FilePerms - } - - var err error - path, err = abs(path) - if err != nil { - return nil, err - } - l := &Log{path: path, opts: *opts} - l.scache.Resize(l.opts.SegmentCacheSize) - if err := os.MkdirAll(path, l.opts.DirPerms); err != nil { - return nil, err - } - if err := l.load(); err != nil { - return nil, err - } - return l, nil -} - -func abs(path string) (string, error) { - if path == ":memory:" { - return "", errors.New("in-memory log not supported") - } - return filepath.Abs(path) -} - -func (l *Log) pushCache(segIdx int) { - _, _, _, v, evicted := - l.scache.SetEvicted(segIdx, l.segments[segIdx]) - if evicted { - s := v.(*segment) - s.ebuf = nil - s.epos = nil - } -} - -// load all the segments. This operation also cleans up any START/END segments. -func (l *Log) load() error { - fis, err := ioutil.ReadDir(l.path) - if err != nil { - return err - } - startIdx := -1 - endIdx := -1 - for _, fi := range fis { - name := fi.Name() - if fi.IsDir() || len(name) < 20 { - continue - } - index, err := strconv.ParseUint(name[:20], 10, 64) - if err != nil || index == 0 { - continue - } - isStart := len(name) == 26 && strings.HasSuffix(name, ".START") - isEnd := len(name) == 24 && strings.HasSuffix(name, ".END") - if len(name) == 20 || isStart || isEnd { - if isStart { - startIdx = len(l.segments) - } else if isEnd && endIdx == -1 { - endIdx = len(l.segments) - } - l.segments = append(l.segments, &segment{ - index: index, - path: filepath.Join(l.path, name), - }) - } - } - if len(l.segments) == 0 { - // Create a new log - l.segments = append(l.segments, &segment{ - index: 1, - path: filepath.Join(l.path, segmentName(1)), - }) - l.firstIndex = 1 - l.lastIndex = 0 - l.sfile, err = os.OpenFile(l.segments[0].path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) - return err - } - // Open existing log. Clean up log if START of END segments exists. - if startIdx != -1 { - if endIdx != -1 { - // There should not be a START and END at the same time - return ErrCorrupt - } - // Delete all files leading up to START - for i := 0; i < startIdx; i++ { - if err := os.Remove(l.segments[i].path); err != nil { - return err - } - } - l.segments = append([]*segment{}, l.segments[startIdx:]...) - // Rename the START segment - orgPath := l.segments[0].path - finalPath := orgPath[:len(orgPath)-len(".START")] - err := os.Rename(orgPath, finalPath) - if err != nil { - return err - } - l.segments[0].path = finalPath - } - if endIdx != -1 { - // Delete all files following END - for i := len(l.segments) - 1; i > endIdx; i-- { - if err := os.Remove(l.segments[i].path); err != nil { - return err - } - } - l.segments = append([]*segment{}, l.segments[:endIdx+1]...) - if len(l.segments) > 1 && l.segments[len(l.segments)-2].index == - l.segments[len(l.segments)-1].index { - // remove the segment prior to the END segment because it shares - // the same starting index. - l.segments[len(l.segments)-2] = l.segments[len(l.segments)-1] - l.segments = l.segments[:len(l.segments)-1] - } - // Rename the END segment - orgPath := l.segments[len(l.segments)-1].path - finalPath := orgPath[:len(orgPath)-len(".END")] - err := os.Rename(orgPath, finalPath) - if err != nil { - return err - } - l.segments[len(l.segments)-1].path = finalPath - } - l.firstIndex = l.segments[0].index - // Open the last segment for appending - lseg := l.segments[len(l.segments)-1] - l.sfile, err = os.OpenFile(lseg.path, os.O_WRONLY, l.opts.FilePerms) - if err != nil { - return err - } - if _, err := l.sfile.Seek(0, 2); err != nil { - return err - } - // Load the last segment entries - if err := l.loadSegmentEntries(lseg); err != nil { - return err - } - l.lastIndex = lseg.index + uint64(len(lseg.epos)) - 1 - return nil -} - -// segmentName returns a 20-byte textual representation of an index -// for lexical ordering. This is used for the file names of log segments. -func segmentName(index uint64) string { - return fmt.Sprintf("%020d", index) -} - -// Close the log. -func (l *Log) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - if l.closed { - if l.corrupt { - return ErrCorrupt - } - return ErrClosed - } - if err := l.sfile.Sync(); err != nil { - return err - } - if err := l.sfile.Close(); err != nil { - return err - } - l.closed = true - if l.corrupt { - return ErrCorrupt - } - return nil -} - -// Write an entry to the log. -func (l *Log) Write(index uint64, data []byte) error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - l.wbatch.Clear() - l.wbatch.Write(index, data) - return l.writeBatch(&l.wbatch) -} - -func (l *Log) appendEntry(dst []byte, index uint64, data []byte) (out []byte, - epos bpos) { - if l.opts.LogFormat == JSON { - return appendJSONEntry(dst, index, data) - } - return appendBinaryEntry(dst, data) -} - -// Cycle the old segment for a new segment. -func (l *Log) cycle() error { - if err := l.sfile.Sync(); err != nil { - return err - } - if err := l.sfile.Close(); err != nil { - return err - } - // cache the previous segment - l.pushCache(len(l.segments) - 1) - s := &segment{ - index: l.lastIndex + 1, - path: filepath.Join(l.path, segmentName(l.lastIndex+1)), - } - var err error - l.sfile, err = os.OpenFile(s.path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) - if err != nil { - return err - } - l.segments = append(l.segments, s) - return nil -} - -func appendJSONEntry(dst []byte, index uint64, data []byte) (out []byte, - epos bpos) { - // {"index":number,"data":string} - mark := len(dst) - dst = append(dst, `{"index":"`...) - dst = strconv.AppendUint(dst, index, 10) - dst = append(dst, `","data":`...) - dst = appendJSONData(dst, data) - dst = append(dst, '}', '\n') - return dst, bpos{mark, len(dst)} -} - -func appendJSONData(dst []byte, s []byte) []byte { - if utf8.Valid(s) { - b, _ := json.Marshal(*(*string)(unsafe.Pointer(&s))) - dst = append(dst, '"', '+') - return append(dst, b[1:]...) - } - dst = append(dst, '"', '$') - dst = append(dst, base64.URLEncoding.EncodeToString(s)...) - return append(dst, '"') -} - -func appendBinaryEntry(dst []byte, data []byte) (out []byte, epos bpos) { - // data_size + data - pos := len(dst) - dst = appendUvarint(dst, uint64(len(data))) - dst = append(dst, data...) - return dst, bpos{pos, len(dst)} -} - -func appendUvarint(dst []byte, x uint64) []byte { - var buf [10]byte - n := binary.PutUvarint(buf[:], x) - dst = append(dst, buf[:n]...) - return dst -} - -// Batch of entries. Used to write multiple entries at once using WriteBatch(). -type Batch struct { - entries []batchEntry - datas []byte -} - -type batchEntry struct { - index uint64 - size int -} - -// Write an entry to the batch -func (b *Batch) Write(index uint64, data []byte) { - b.entries = append(b.entries, batchEntry{index, len(data)}) - b.datas = append(b.datas, data...) -} - -// Clear the batch for reuse. -func (b *Batch) Clear() { - b.entries = b.entries[:0] - b.datas = b.datas[:0] -} - -// WriteBatch writes the entries in the batch to the log in the order that they -// were added to the batch. The batch is cleared upon a successful return. -func (l *Log) WriteBatch(b *Batch) error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - if len(b.entries) == 0 { - return nil - } - return l.writeBatch(b) -} - -func (l *Log) writeBatch(b *Batch) error { - // check that all indexes in batch are sane - for i := 0; i < len(b.entries); i++ { - if b.entries[i].index != l.lastIndex+uint64(i+1) { - return ErrOutOfOrder - } - } - // load the tail segment - s := l.segments[len(l.segments)-1] - if len(s.ebuf) > l.opts.SegmentSize { - // tail segment has reached capacity. Close it and create a new one. - if err := l.cycle(); err != nil { - return err - } - s = l.segments[len(l.segments)-1] - } - - mark := len(s.ebuf) - datas := b.datas - for i := 0; i < len(b.entries); i++ { - data := datas[:b.entries[i].size] - var epos bpos - s.ebuf, epos = l.appendEntry(s.ebuf, b.entries[i].index, data) - s.epos = append(s.epos, epos) - if len(s.ebuf) >= l.opts.SegmentSize { - // segment has reached capacity, cycle now - if _, err := l.sfile.Write(s.ebuf[mark:]); err != nil { - return err - } - l.lastIndex = b.entries[i].index - if err := l.cycle(); err != nil { - return err - } - s = l.segments[len(l.segments)-1] - mark = 0 - } - datas = datas[b.entries[i].size:] - } - if len(s.ebuf)-mark > 0 { - if _, err := l.sfile.Write(s.ebuf[mark:]); err != nil { - return err - } - l.lastIndex = b.entries[len(b.entries)-1].index - } - if !l.opts.NoSync { - if err := l.sfile.Sync(); err != nil { - return err - } - } - b.Clear() - return nil -} - -// FirstIndex returns the index of the first entry in the log. Returns zero -// when log has no entries. -func (l *Log) FirstIndex() (index uint64, err error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.corrupt { - return 0, ErrCorrupt - } else if l.closed { - return 0, ErrClosed - } - // We check the lastIndex for zero because the firstIndex is always one or - // more, even when there's no entries - if l.lastIndex == 0 { - return 0, nil - } - return l.firstIndex, nil -} - -// LastIndex returns the index of the last entry in the log. Returns zero when -// log has no entries. -func (l *Log) LastIndex() (index uint64, err error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.corrupt { - return 0, ErrCorrupt - } else if l.closed { - return 0, ErrClosed - } - if l.lastIndex == 0 { - return 0, nil - } - return l.lastIndex, nil -} - -// findSegment performs a bsearch on the segments -func (l *Log) findSegment(index uint64) int { - i, j := 0, len(l.segments) - for i < j { - h := i + (j-i)/2 - if index >= l.segments[h].index { - i = h + 1 - } else { - j = h - } - } - return i - 1 -} - -func (l *Log) loadSegmentEntries(s *segment) error { - data, err := ioutil.ReadFile(s.path) - if err != nil { - return err - } - ebuf := data - var epos []bpos - var pos int - for exidx := s.index; len(data) > 0; exidx++ { - var n int - if l.opts.LogFormat == JSON { - n, err = loadNextJSONEntry(data) - } else { - n, err = loadNextBinaryEntry(data) - } - if err != nil { - return err - } - data = data[n:] - epos = append(epos, bpos{pos, pos + n}) - pos += n - } - s.ebuf = ebuf - s.epos = epos - return nil -} - -func loadNextJSONEntry(data []byte) (n int, err error) { - // {"index":number,"data":string} - idx := bytes.IndexByte(data, '\n') - if idx == -1 { - return 0, ErrCorrupt - } - line := data[:idx] - dres := gjson.Get(*(*string)(unsafe.Pointer(&line)), "data") - if dres.Type != gjson.String { - return 0, ErrCorrupt - } - return idx + 1, nil -} - -func loadNextBinaryEntry(data []byte) (n int, err error) { - // data_size + data - size, n := binary.Uvarint(data) - if n <= 0 { - return 0, ErrCorrupt - } - if uint64(len(data)-n) < size { - return 0, ErrCorrupt - } - return n + int(size), nil -} - -// loadSegment loads the segment entries into memory, pushes it to the front -// of the lru cache, and returns it. -func (l *Log) loadSegment(index uint64) (*segment, error) { - // check the last segment first. - lseg := l.segments[len(l.segments)-1] - if index >= lseg.index { - return lseg, nil - } - // check the most recent cached segment - var rseg *segment - l.scache.Range(func(_, v interface{}) bool { - s := v.(*segment) - if index >= s.index && index < s.index+uint64(len(s.epos)) { - rseg = s - } - return false - }) - if rseg != nil { - return rseg, nil - } - // find in the segment array - idx := l.findSegment(index) - s := l.segments[idx] - if len(s.epos) == 0 { - // load the entries from cache - if err := l.loadSegmentEntries(s); err != nil { - return nil, err - } - } - // push the segment to the front of the cache - l.pushCache(idx) - return s, nil -} - -// Read an entry from the log. Returns a byte slice containing the data entry. -func (l *Log) Read(index uint64) (data []byte, err error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.corrupt { - return nil, ErrCorrupt - } else if l.closed { - return nil, ErrClosed - } - if index == 0 || index < l.firstIndex || index > l.lastIndex { - return nil, ErrNotFound - } - s, err := l.loadSegment(index) - if err != nil { - return nil, err - } - epos := s.epos[index-s.index] - edata := s.ebuf[epos.pos:epos.end] - if l.opts.LogFormat == JSON { - return readJSON(edata) - } - // binary read - size, n := binary.Uvarint(edata) - if n <= 0 { - return nil, ErrCorrupt - } - if uint64(len(edata)-n) < size { - return nil, ErrCorrupt - } - if l.opts.NoCopy { - data = edata[n : uint64(n)+size] - } else { - data = make([]byte, size) - copy(data, edata[n:]) - } - return data, nil -} - -//go:noinline -func readJSON(edata []byte) ([]byte, error) { - var data []byte - s := gjson.Get(*(*string)(unsafe.Pointer(&edata)), "data").String() - if len(s) > 0 && s[0] == '$' { - var err error - data, err = base64.URLEncoding.DecodeString(s[1:]) - if err != nil { - return nil, ErrCorrupt - } - } else if len(s) > 0 && s[0] == '+' { - data = make([]byte, len(s[1:])) - copy(data, s[1:]) - } else { - return nil, ErrCorrupt - } - return data, nil -} - -// ClearCache clears the segment cache -func (l *Log) ClearCache() error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - l.clearCache() - return nil -} -func (l *Log) clearCache() { - l.scache.Range(func(_, v interface{}) bool { - s := v.(*segment) - s.ebuf = nil - s.epos = nil - return true - }) - l.scache = tinylru.LRU{} - l.scache.Resize(l.opts.SegmentCacheSize) -} - -// TruncateFront truncates the front of the log by removing all entries that -// are before the provided `index`. In other words the entry at -// `index` becomes the first entry in the log. -func (l *Log) TruncateFront(index uint64) error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - return l.truncateFront(index) -} -func (l *Log) truncateFront(index uint64) (err error) { - if index == 0 || l.lastIndex == 0 || - index < l.firstIndex || index > l.lastIndex { - return ErrOutOfRange - } - if index == l.firstIndex { - // nothing to truncate - return nil - } - segIdx := l.findSegment(index) - var s *segment - s, err = l.loadSegment(index) - if err != nil { - return err - } - epos := s.epos[index-s.index:] - ebuf := s.ebuf[epos[0].pos:] - // Create a temp file contains the truncated segment. - tempName := filepath.Join(l.path, "TEMP") - err = func() error { - f, err := os.OpenFile(tempName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) - if err != nil { - return err - } - defer f.Close() - if _, err := f.Write(ebuf); err != nil { - return err - } - if err := f.Sync(); err != nil { - return err - } - return f.Close() - }() - // Rename the TEMP file to it's START file name. - startName := filepath.Join(l.path, segmentName(index)+".START") - if err = os.Rename(tempName, startName); err != nil { - return err - } - // The log was truncated but still needs some file cleanup. Any errors - // following this message will not cause an on-disk data ocorruption, but - // may cause an inconsistency with the current program, so we'll return - // ErrCorrupt so the the user can attempt a recover by calling Close() - // followed by Open(). - defer func() { - if v := recover(); v != nil { - err = ErrCorrupt - l.corrupt = true - } - }() - if segIdx == len(l.segments)-1 { - // Close the tail segment file - if err = l.sfile.Close(); err != nil { - return err - } - } - // Delete truncated segment files - for i := 0; i <= segIdx; i++ { - if err = os.Remove(l.segments[i].path); err != nil { - return err - } - } - // Rename the START file to the final truncated segment name. - newName := filepath.Join(l.path, segmentName(index)) - if err = os.Rename(startName, newName); err != nil { - return err - } - s.path = newName - s.index = index - if segIdx == len(l.segments)-1 { - // Reopen the tail segment file - if l.sfile, err = os.OpenFile(newName, os.O_WRONLY, l.opts.FilePerms); err != nil { - return err - } - var n int64 - if n, err = l.sfile.Seek(0, 2); err != nil { - return err - } - if n != int64(len(ebuf)) { - err = errors.New("invalid seek") - return err - } - // Load the last segment entries - if err = l.loadSegmentEntries(s); err != nil { - return err - } - } - l.segments = append([]*segment{}, l.segments[segIdx:]...) - l.firstIndex = index - l.clearCache() - return nil -} - -// TruncateBack truncates the back of the log by removing all entries that -// are after the provided `index`. In other words the entry at `index` -// becomes the last entry in the log. -func (l *Log) TruncateBack(index uint64) error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - return l.truncateBack(index) -} - -func (l *Log) truncateBack(index uint64) (err error) { - if index == 0 || l.lastIndex == 0 || - index < l.firstIndex || index > l.lastIndex { - return ErrOutOfRange - } - if index == l.lastIndex { - // nothing to truncate - return nil - } - segIdx := l.findSegment(index) - var s *segment - s, err = l.loadSegment(index) - if err != nil { - return err - } - epos := s.epos[:index-s.index+1] - ebuf := s.ebuf[:epos[len(epos)-1].end] - // Create a temp file contains the truncated segment. - tempName := filepath.Join(l.path, "TEMP") - err = func() error { - f, err := os.OpenFile(tempName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, l.opts.FilePerms) - if err != nil { - return err - } - defer f.Close() - if _, err := f.Write(ebuf); err != nil { - return err - } - if err := f.Sync(); err != nil { - return err - } - return f.Close() - }() - // Rename the TEMP file to it's END file name. - endName := filepath.Join(l.path, segmentName(s.index)+".END") - if err = os.Rename(tempName, endName); err != nil { - return err - } - // The log was truncated but still needs some file cleanup. Any errors - // following this message will not cause an on-disk data ocorruption, but - // may cause an inconsistency with the current program, so we'll return - // ErrCorrupt so the the user can attempt a recover by calling Close() - // followed by Open(). - defer func() { - if v := recover(); v != nil { - err = ErrCorrupt - l.corrupt = true - } - }() - - // Close the tail segment file - if err = l.sfile.Close(); err != nil { - return err - } - // Delete truncated segment files - for i := segIdx; i < len(l.segments); i++ { - if err = os.Remove(l.segments[i].path); err != nil { - return err - } - } - // Rename the END file to the final truncated segment name. - newName := filepath.Join(l.path, segmentName(s.index)) - if err = os.Rename(endName, newName); err != nil { - return err - } - // Reopen the tail segment file - if l.sfile, err = os.OpenFile(newName, os.O_WRONLY, l.opts.FilePerms); err != nil { - return err - } - var n int64 - n, err = l.sfile.Seek(0, 2) - if err != nil { - return err - } - if n != int64(len(ebuf)) { - err = errors.New("invalid seek") - return err - } - s.path = newName - l.segments = append([]*segment{}, l.segments[:segIdx+1]...) - l.lastIndex = index - l.clearCache() - if err = l.loadSegmentEntries(s); err != nil { - return err - } - return nil -} - -// Sync performs an fsync on the log. This is not necessary when the -// NoSync option is set to false. -func (l *Log) Sync() error { - l.mu.Lock() - defer l.mu.Unlock() - if l.corrupt { - return ErrCorrupt - } else if l.closed { - return ErrClosed - } - return l.sfile.Sync() -} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index fe8c8479..c74fc20f 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x +// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index ce48b1dd..e5faa375 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index b908696b..248a3824 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc +// +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 1f539388..4cfa5438 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc +// +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public // domain sources at https://github.com/gvanas/KeccakCodePackage @@ -319,9 +320,9 @@ MOVQ rDi, _si(oState); \ MOVQ rDo, _so(oState) \ -// func keccakF1600(a *[25]uint64) +// func keccakF1600(state *[25]uint64) TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ a+0(FP), rpState + MOVQ state+0(FP), rpState // Convert the user state into an internal state NOTQ _be(rpState) diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index addfd504..8b4453aa 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.4 +// +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index b4fbbf86..ec26f147 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc && !purego +// +build gc,!purego package sha3 @@ -143,12 +144,6 @@ func (s *asmState) Write(b []byte) (int, error) { // Read squeezes an arbitrary number of bytes from the sponge. func (s *asmState) Read(out []byte) (n int, err error) { - // The 'compute last message digest' instruction only stores the digest - // at the first operand (dst) for SHAKE functions. - if s.function != shake_128 && s.function != shake_256 { - panic("sha3: can only call Read for SHAKE functions") - } - n = len(out) // need to pad if we were absorbing @@ -208,17 +203,8 @@ func (s *asmState) Sum(b []byte) []byte { // Hash the buffer. Note that we don't clear it because we // aren't updating the state. - switch s.function { - case sha3_224, sha3_256, sha3_384, sha3_512: - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) - case shake_128, shake_256: - d := make([]byte, s.outputLen, 64) - klmd(s.function, &a, d, s.buf) - return append(b, d[:s.outputLen]...) - default: - panic("sha3: unknown function") - } + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) } // Reset resets the Hash to its initial state. diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s index 826b862c..a0e051b0 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc && !purego +// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 8d31cf5b..5c0710ef 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x +// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 7337cca8..59c8eb94 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !386 && !ppc64le) || purego +// +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 870e2d16..1ce60624 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (amd64 || 386 || ppc64le) && !purego +// +build amd64 386 ppc64le +// +build !purego package sha3 diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab1..2466ae3d 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the document's DOM tree. +specification, dynamically modifying or extending the docuemnt's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 3c57880d..de67f938 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,6 +910,9 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -917,9 +920,7 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case ' ', '\n', '\r', '\t', '\f', '/', '>': - // WHATWG 13.2.5.33 Attribute name state - // We need to reconsume the char in the after attribute name state to support the / character + case '>': z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -938,11 +939,6 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } - if c == '/' { - // WHATWG 13.2.5.34 After attribute name state - // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. - return - } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 9b4de940..6e071e85 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [256]bool{ +var isTokenTable = [127]bool{ '!': true, '#': true, '$': true, @@ -93,7 +93,12 @@ var isTokenTable = [256]bool{ } func IsTokenRune(r rune) bool { - return r < utf8.RuneSelf && isTokenTable[byte(r)] + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) } // HeaderValuesContainsToken reports whether any string in values @@ -197,8 +202,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for i := 0; i < len(v); i++ { - if !isTokenTable[v[i]] { + for _, r := range v { + if !IsTokenRune(r) { return false } } diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index e6f55cbd..a3067f8d 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,44 +20,41 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return new([1 << 10]byte) }}, - {New: func() interface{} { return new([2 << 10]byte) }}, - {New: func() interface{} { return new([4 << 10]byte) }}, - {New: func() interface{} { return new([8 << 10]byte) }}, - {New: func() interface{} { return new([16 << 10]byte) }}, -} +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) func getDataBufferChunk(size int64) []byte { - switch { - case size <= 1<<10: - return dataChunkPools[0].Get().(*[1 << 10]byte)[:] - case size <= 2<<10: - return dataChunkPools[1].Get().(*[2 << 10]byte)[:] - case size <= 4<<10: - return dataChunkPools[2].Get().(*[4 << 10]byte)[:] - case size <= 8<<10: - return dataChunkPools[3].Get().(*[8 << 10]byte)[:] - default: - return dataChunkPools[4].Get().(*[16 << 10]byte)[:] + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } } + return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - switch len(p) { - case 1 << 10: - dataChunkPools[0].Put((*[1 << 10]byte)(p)) - case 2 << 10: - dataChunkPools[1].Put((*[2 << 10]byte)(p)) - case 4 << 10: - dataChunkPools[2].Put((*[4 << 10]byte)(p)) - case 8 << 10: - dataChunkPools[3].Put((*[8 << 10]byte)(p)) - case 16 << 10: - dataChunkPools[4].Put((*[16 << 10]byte)(p)) - default: - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b27..c1f6b90d 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,9 +490,6 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. -// -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1513,18 +1510,19 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := int(fr.maxHeaderListSize()) - if v < 0 { - // If maxHeaderListSize overflows an int, use no limit (0). - return 0 + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) } - return v + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 } // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1567,7 +1565,6 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true - remainSize = 0 return } remainSize -= size @@ -1580,38 +1577,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() - - // Avoid parsing large amounts of headers that we will then discard. - // If the sender exceeds the max header list size by too much, - // skip parsing the fragment and close the connection. - // - // "Too much" is either any CONTINUATION frame after we've already - // exceeded the max header list size (in which case remainSize is 0), - // or a frame whose encoded size is more than twice the remaining - // header list bytes we're willing to accept. - if int64(len(frag)) > int64(2*remainSize) { - if VerboseLogs { - log.Printf("http2: header list too large") - } - // It would be nice to send a RST_STREAM before sending the GOAWAY, - // but the structure of the server's frame writer makes this difficult. - return mh, ConnectionError(ErrCodeProtocol) - } - - // Also close the connection after any CONTINUATION frame following an - // invalid header, since we stop tracking the size of the headers after - // an invalid one. - if invalid != nil { - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - // It would be nice to send a RST_STREAM before sending the GOAWAY, - // but the structure of the server's frame writer makes this difficult. - return mh, ConnectionError(ErrCodeProtocol) - } - if _, err := hdec.Write(frag); err != nil { - return mh, ConnectionError(ErrCodeCompression) + return nil, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1628,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return mh, ConnectionError(ErrCodeCompression) + return nil, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 00000000..5bf62b03 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go new file mode 100644 index 00000000..908af1ab --- /dev/null +++ b/vendor/golang.org/x/net/http2/go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.15 +// +build go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 00000000..aca4b2b3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 00000000..cc0baa81 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go new file mode 100644 index 00000000..e6c04cf7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.15 +// +build !go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext opens a TLS connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if cfg.InsecureSkipVerify { + return cn, nil + } + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + return cn, nil +} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 00000000..eab532c9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 3b9f06b9..684d984f 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,10 +77,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var ( - errClosedPipeWrite = errors.New("write on closed buffer") - errUninitializedPipeWrite = errors.New("write on uninitialized buffer") -) +var errClosedPipeWrite = errors.New("write on closed buffer") // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -94,12 +91,6 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - // pipe.setBuffer is never invoked, leaving the buffer uninitialized. - // We shouldn't try to write to an uninitialized pipe, - // but returning an error is better than panicking. - if p.b == nil { - return 0, errUninitializedPipeWrite - } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index c5d08108..de60fa88 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,7 +124,6 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. - // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -435,7 +434,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout > 0 { + if sc.hs.WriteTimeout != 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -582,11 +581,9 @@ type serverConn struct { advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push - curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream - unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -732,7 +729,11 @@ func isClosedConnError(err error) bool { return false } - if errors.Is(err, net.ErrClosed) { + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { return true } @@ -921,7 +922,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout != 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -980,8 +981,6 @@ func (sc *serverConn) serve() { return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() - case handlerDoneMsg: - sc.handlerDone() default: panic("unknown timer") } @@ -1021,7 +1020,6 @@ var ( idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) - handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1478,11 +1476,6 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: - if res.f != nil { - if id := res.f.Header().StreamID; id > sc.maxClientStreamID { - sc.maxClientStreamID = id - } - } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1639,7 +1632,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout != 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2019,12 +2012,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout > 0 { + if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - return sc.scheduleHandler(id, rw, req, handler) + go sc.runHandler(rw, req, handler) + return nil } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2040,14 +2034,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout > 0 { + if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) } - // This is the first request on the connection, - // so start the handler directly rather than going - // through scheduleHandler. - sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2118,7 +2108,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout > 0 { + if sc.hs.WriteTimeout != 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2288,62 +2278,8 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response return &responseWriter{rws: rws} } -type unstartedHandler struct { - streamID uint32 - rw *responseWriter - req *http.Request - handler func(http.ResponseWriter, *http.Request) -} - -// scheduleHandler starts a handler goroutine, -// or schedules one to start as soon as an existing handler finishes. -func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { - sc.serveG.check() - maxHandlers := sc.advMaxStreams - if sc.curHandlers < maxHandlers { - sc.curHandlers++ - go sc.runHandler(rw, req, handler) - return nil - } - if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { - return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) - } - sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ - streamID: streamID, - rw: rw, - req: req, - handler: handler, - }) - return nil -} - -func (sc *serverConn) handlerDone() { - sc.serveG.check() - sc.curHandlers-- - i := 0 - maxHandlers := sc.advMaxStreams - for ; i < len(sc.unstartedHandlers); i++ { - u := sc.unstartedHandlers[i] - if sc.streams[u.streamID] == nil { - // This stream was reset before its goroutine had a chance to start. - continue - } - if sc.curHandlers >= maxHandlers { - break - } - sc.curHandlers++ - go sc.runHandler(u.rw, u.req, u.handler) - sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references - } - sc.unstartedHandlers = sc.unstartedHandlers[i:] - if len(sc.unstartedHandlers) == 0 { - sc.unstartedHandlers = nil - } -} - // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() @@ -2551,6 +2487,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2670,6 +2607,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -2690,6 +2628,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -2701,6 +2640,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil @@ -2916,12 +2858,14 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) + }) != nil { + rws.dirty = true + } return } @@ -2986,10 +2930,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } } // Push errors. @@ -3172,7 +3125,6 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } - sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd1..00000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2fa49490..4515b22c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,12 +147,6 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool - // IdleConnTimeout is the maximum amount of time an idle - // (keep-alive) connection will remain idle before closing - // itself. - // Zero means no limit. - IdleConnTimeout time.Duration - // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -184,8 +178,6 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - - syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -310,7 +302,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -352,60 +344,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +425,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +435,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +445,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -599,6 +537,15 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } +var retryBackoffHook func(time.Duration) *time.Timer + +func backoffNewTimer(d time.Duration) *time.Timer { + if retryBackoffHook != nil { + return retryBackoffHook(d) + } + return time.NewTimer(d) +} + // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -626,27 +573,13 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + timer := backoffNewTimer(d) select { - case <-tm.C(): + case <-timer.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - tm.Stop() + timer.Stop() err = req.Context().Err() } } @@ -725,9 +658,6 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) - } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -736,7 +666,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +732,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,15 +750,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, - } - if hooks != nil { - hooks.newclientconn(cc) - c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -893,7 +818,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + go cc.readLoop() return cc, nil } @@ -901,7 +826,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -936,20 +861,7 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID <= last { - // The server's GOAWAY indicates that it received this stream. - // It will either finish processing it, or close the connection - // without doing so. Either way, leave the stream alone for now. - continue - } - if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { - // Don't retry the first stream on a connection if we get a non-NO error. - // If the server is sending an error on a new connection, - // retrying the request on a new one probably isn't going to work. - cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) - } else { - // Aborting the stream with errClentConnGotGoAway indicates that - // the request should be retried on a new connection. + if streamID > last { cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1106,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tc.NetConn(); nc != nil { + if nc := tlsUnderlyingConn(tc); nc != nil { nc.Close() } } @@ -1144,7 +1056,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1156,9 +1068,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1168,7 +1080,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1206,7 +1118,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1303,10 +1215,6 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - return cc.roundTrip(req, nil) -} - -func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1321,23 +1229,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + go cs.doRequest(req) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1398,24 +1292,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1471,21 +1348,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1510,10 +1372,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1594,30 +1452,15 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1766,7 +1609,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2028,24 +1871,8 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() - } -} - -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } + cc.cond.Wait() } - return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -2085,14 +1912,19 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers+trailers and return an error before we + // Check for any invalid headers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) + } + } } enumerateHeaders := func(f func(name, value string)) { @@ -2311,7 +2143,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2399,7 +2231,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2434,9 +2266,10 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() } for { f, err := cc.fr.ReadFrame() @@ -2851,7 +2684,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.pastHeaders { + if !cs.firstByte { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -3034,7 +2867,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3078,18 +2911,9 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { - // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR - if cs != nil { - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeFlowControl, - }) - return nil - } - return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3131,38 +2955,24 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - var pingError error - errc := make(chan struct{}) - cc.goRun(func() { + errc := make(chan error, 1) + go func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if pingError = cc.fr.WritePing(false, p); pingError != nil { - close(errc) + if err := cc.fr.WritePing(false, p); err != nil { + errc <- err return } - if pingError = cc.bw.Flush(); pingError != nil { - close(errc) + if err := cc.bw.Flush(); err != nil { + errc <- err return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil - case <-errc: - return pingError + case err := <-errc: + return err case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3331,17 +3141,9 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { - // to keep things backwards compatible, we use non-zero values of - // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying - // http1 transport, followed by 0 - if t.IdleConnTimeout != 0 { - return t.IdleConnTimeout - } - if t.t1 != nil { return t.t1.IdleConnTimeout } - return 0 } @@ -3399,34 +3201,3 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go index f625483f..6c3ebfae 100644 --- a/vendor/golang.org/x/net/icmp/helper_posix.go +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package icmp diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go index b7cb15b7..6aea8047 100644 --- a/vendor/golang.org/x/net/icmp/listen_posix.go +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package icmp diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go index 7b76be1c..1acfb74b 100644 --- a/vendor/golang.org/x/net/icmp/listen_stub.go +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package icmp diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index 712f1ad8..c5c4338d 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.18 +// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 7b371788..64ccf85f 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.10 +// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index cc6a892a..ee1698ce 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.10 +// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 40e74bb3..3aaccab1 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.18 +// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index c6c2bf10..d1d62ef4 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 76789393..167efba7 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index 0600cd2a..ab40f7bc 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 2fb768ef..66701ead 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 5ff05fe1..40033778 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 +// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 0f25e84c..4074b533 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 +// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index 8a75b966..bb63f904 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.16 +// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index fa45bb90..7d68a8dc 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.16 +// +build go1.16 package idna diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 33a5bf59..4bdaaaf1 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go index 68f438c8..0d30e0a0 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go index 058ea8de..4936e8a6 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux +// +build arm mips mipsle 386 ppc +// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index 3ca0d3a0..f6877f98 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go index 6d0e426c..d3dbe1b8 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris +// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index 7ca9cb7e..1d9f2ed6 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go index 0211f225..19d46789 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go index 2038f290..5b1d50ae 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go index 70e6f448..be634095 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || windows || zos +// +build aix windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/empty.s b/vendor/golang.org/x/net/internal/socket/empty.s index 49d79791..90ab4ca3 100644 --- a/vendor/golang.org/x/net/internal/socket/empty.s +++ b/vendor/golang.org/x/net/internal/socket/empty.s @@ -3,5 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 +// +build darwin,go1.12 // This exists solely so we can linkname in symbols from syscall. diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go index 7a5cc5c4..78f41290 100644 --- a/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go index 340e53fb..2b8fbb3f 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && (darwin || dragonfly || freebsd || linux || netbsd || openbsd) +// +build arm mips mipsle 386 ppc +// +build darwin dragonfly freebsd linux netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index 26470c19..2e94e96f 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos) +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build aix darwin dragonfly freebsd linux netbsd openbsd zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go index 8859ce10..f7da2bc4 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris +// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go index da886b03..14caf524 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go index 4825b21e..113e773c 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && !netbsd +// +build !aix,!linux,!netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 311fd2c7..41883c53 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || linux || netbsd +// +build aix linux netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go index ebff4f6e..25f6847f 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index 62e6fe86..5b8e00f1 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd +// +build aix darwin dragonfly freebsd netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go index 3dd07250..b4658fba 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux +// +build arm mips mipsle 386 ppc +// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 5af9ddd6..42411aff 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go index e212b50f..3098f5d7 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris +// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index e8767764..eb79151f 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go index 529db68e..324e9ee7 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build s390x && zos +// +build s390x,zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/norace.go b/vendor/golang.org/x/net/internal/socket/norace.go index 8af30ecf..de0ad420 100644 --- a/vendor/golang.org/x/net/internal/socket/norace.go +++ b/vendor/golang.org/x/net/internal/socket/norace.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !race +// +build !race package socket diff --git a/vendor/golang.org/x/net/internal/socket/race.go b/vendor/golang.org/x/net/internal/socket/race.go index 9afa9580..f0a28a62 100644 --- a/vendor/golang.org/x/net/internal/socket/race.go +++ b/vendor/golang.org/x/net/internal/socket/race.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build race +// +build race package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 04313907..8f79b38f 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux +// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index 7c0d7410..f7d0b0d2 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go index e363fb5a..02f32855 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux +// +build !linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index ff7a8baf..dd785877 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go index e7664d48..b258879d 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go index d7627f87..5d99f237 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go index 08d49107..76f5b8ae 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && !s390x && !386 +// +build linux,!s390x,!386 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go index 1d182470..af964e61 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build loong64 +// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go index 0e407d12..5b128fbb 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build riscv64 +// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 58d86548..42b8f234 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 2e5b473c..7cfb349c 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go index 93058db5..de823932 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go index 45bab004..00691bd5 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -3,6 +3,7 @@ // Added for go1.11 compatibility //go:build aix +// +build aix package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go index b6fc15a1..6a94fec2 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build loong64 +// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go index e67fc3cb..c066272d 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build riscv64 +// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go index c88da8cb..b7385dfd 100644 --- a/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go index 14ae2dae..0e748dbd 100644 --- a/vendor/golang.org/x/net/ipv4/control_pktinfo.go +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || solaris +// +build darwin linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go index 3ba66116..f27322c3 100644 --- a/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go index 2e765548..2413e02f 100644 --- a/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go index c2c4ce7f..cd4ee6e1 100644 --- a/vendor/golang.org/x/net/ipv4/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux +// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index 91c685e8..1bb370e2 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 2afd4b50..53f0794e 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go index 82e2c378..eb07c1c0 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 840108bf..cf036893 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go index 9244a68a..02730cdf 100644 --- a/vendor/golang.org/x/net/ipv4/sys_aix.go +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -4,6 +4,7 @@ // Added for go1.11 compatibility //go:build aix +// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 645f254c..22322b38 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go index 48cfb6db..fde64014 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go index 0b27b632..54eb9901 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux +// +build darwin freebsd linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index 303a5e2e..dcb15f25 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux +// +build !darwin,!freebsd,!linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go index 1b4780df..fb11e324 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux +// +build linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go index b1f779b4..fc53a0d3 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux +// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go index b7b032d2..e191b2f1 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build netbsd || openbsd +// +build netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go index a295e15e..6a4e7abf 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux || solaris +// +build darwin freebsd linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go index 74bd454e..157159fd 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux && !solaris +// +build !darwin,!freebsd,!linux,!solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go index 20af4074..d5508516 100644 --- a/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go index dd454025..b7f2d6e5 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -3,6 +3,7 @@ // Added for go1.11 compatibility //go:build aix +// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go index 54f9e139..e15c22c7 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build loong64 +// +build loong64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go index 78374a52..e2edebdb 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build riscv64 +// +build riscv64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go index a8f04e7b..2733ddbe 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin +// +build darwin package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index 51fbbb1f..9c90844a 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go index eb28ce75..b7e8643f 100644 --- a/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go index 9c73b864..63e475db 100644 --- a/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go index 2814534a..120bf877 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go index c92c9b51..d60136a9 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index be04e4d6..b0692e43 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index 29b9ccf6..cd0ff508 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 34dfed58..37c62871 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go index a09c3aaf..32fd8664 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go index 93c8efc4..a47182af 100644 --- a/vendor/golang.org/x/net/ipv6/sys_aix.go +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -4,6 +4,7 @@ // Added for go1.11 compatibility //go:build aix +// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go index 5c9cb444..6ff9950d 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go index dc704946..485290cb 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go index e39f75f4..b5661fb8 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux +// +build linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go index 8532a8f5..cb006618 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux +// +build !linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go index 9f3bc2af..bde41a6c 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build dragonfly || netbsd || openbsd +// +build dragonfly netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index b40f5c68..023488a4 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || solaris || zos +// +build aix darwin freebsd linux solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index 6526aad5..acdf2e5c 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !freebsd && !linux && !solaris && !zos +// +build !aix,!darwin,!freebsd,!linux,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go index 76602c34..5807bba3 100644 --- a/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go index 668716df..f604b0f3 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -3,6 +3,7 @@ // Added for go1.11 compatibility //go:build aix +// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go index 6a53284d..598fbfa0 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build loong64 +// +build loong64 package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go index 13b34720..d4f78e40 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -2,6 +2,7 @@ // cgo -godefs defs_linux.go //go:build riscv64 +// +build riscv64 package ipv6 diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 2a7cf70d..6a66aea5 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright 2009 The Go Authors. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 948a3ee6..00000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -// -// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks -// returning errors. -package errgroup - -import ( - "context" - "fmt" - "sync" -) - -type token struct{} - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid, has no limit on the number of active goroutines, -// and does not cancel on error. -type Group struct { - cancel func(error) - - wg sync.WaitGroup - - sem chan token - - errOnce sync.Once - err error -} - -func (g *Group) done() { - if g.sem != nil { - <-g.sem - } - g.wg.Done() -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel(g.err) - } - return g.err -} - -// Go calls the given function in a new goroutine. -// It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. -// -// The first call to return a non-nil error cancels the group's context, if the -// group was created by calling WithContext. The error will be returned by Wait. -func (g *Group) Go(f func() error) { - if g.sem != nil { - g.sem <- token{} - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel(g.err) - } - }) - } - }() -} - -// TryGo calls the given function in a new goroutine only if the number of -// active goroutines in the group is currently below the configured limit. -// -// The return value reports whether the goroutine was started. -func (g *Group) TryGo(f func() error) bool { - if g.sem != nil { - select { - case g.sem <- token{}: - // Note: this allows barging iff channels in general allow barging. - default: - return false - } - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel(g.err) - } - }) - } - }() - return true -} - -// SetLimit limits the number of active goroutines in this group to at most n. -// A negative value indicates no limit. -// -// Any subsequent call to the Go method will block until it can add an active -// goroutine without exceeding the configured limit. -// -// The limit must not be modified while any goroutines in the group are active. -func (g *Group) SetLimit(n int) { - if n < 0 { - g.sem = nil - return - } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) - } - g.sem = make(chan token, n) -} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b..00000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce3343..00000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go index b618162a..30f632c5 100644 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -35,25 +35,11 @@ type Weighted struct { // Acquire acquires the semaphore with a weight of n, blocking until resources // are available or ctx is done. On success, returns nil. On failure, returns // ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. func (s *Weighted) Acquire(ctx context.Context, n int64) error { - done := ctx.Done() - s.mu.Lock() - select { - case <-done: - // ctx becoming done has "happened before" acquiring the semaphore, - // whether it became done before the call began or while we were - // waiting for the mutex. We prefer to fail even if we could acquire - // the mutex without blocking. - s.mu.Unlock() - return ctx.Err() - default: - } if s.size-s.cur >= n && s.waiters.Len() == 0 { - // Since we hold s.mu and haven't synchronized since checking done, if - // ctx becomes done before we return here, it becoming done must have - // "happened concurrently" with this call - it cannot "happen before" - // we return in this branch. So, we're ok to always acquire here. s.cur += n s.mu.Unlock() return nil @@ -62,7 +48,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { if n > s.size { // Don't make other Acquire calls block on one that's doomed to fail. s.mu.Unlock() - <-done + <-ctx.Done() return ctx.Err() } @@ -72,14 +58,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { s.mu.Unlock() select { - case <-done: + case <-ctx.Done(): + err := ctx.Err() s.mu.Lock() select { case <-ready: - // Acquired the semaphore after we were canceled. - // Pretend we didn't and put the tokens back. - s.cur -= n - s.notifyWaiters() + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil default: isFront := s.waiters.Front() == elem s.waiters.Remove(elem) @@ -89,19 +75,9 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { } } s.mu.Unlock() - return ctx.Err() + return err case <-ready: - // Acquired the semaphore. Check that ctx isn't already done. - // We check the done channel instead of calling ctx.Err because we - // already have the channel, and ctx.Err is O(n) with the nesting - // depth of ctx. - select { - case <-done: - s.Release(n) - return ctx.Err() - default: - } return nil } } diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s index 269e173c..db9171c2 100644 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 8fa707aa..4756ad5f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,7 +103,6 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions - HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 9bf0c32e..8aaeef54 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix +// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 0e27a21e..f3eb993b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,7 +28,6 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, - {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -165,15 +164,6 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true - - parseARM64SVERegister(getzfr0()) - } -} - -func parseARM64SVERegister(zfr0 uint64) { - switch extractBits(zfr0, 0, 3) { - case 1: - ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index 22cc9984..c61f95a0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" @@ -29,11 +30,3 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET - -// func getzfr0() uint64 -TEXT ·getzfr0(SB),NOSPLIT,$0-8 - // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 - MOVD R0, ret+0(FP) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index 6ac6e1ef..ccf542a7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -3,10 +3,10 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 -func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index c8ae6ddc..0af2f248 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb..fa7cdb9b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc +// +build 386 amd64 amd64p32 +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 7f194678..2aff3189 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gccgo +// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go index 9526d2ce..4bfbda61 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gccgo +// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index 3f73a05d..6cc73109 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo #include #include diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9..863d415a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 743eb543..159a686f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !386 && !amd64 && !amd64p32 && !arm64 +// +build !386,!amd64,!amd64p32,!arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 3d386d0f..a968b80f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,8 +35,6 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 - - hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -106,9 +104,6 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) - - // HWCAP2 feature bits - ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index 4686c1d5..6000db4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) +// +build linux +// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733..f4992b1a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 197188e6..021356d6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 55863585..0f57b05b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build loong64 +// +build loong64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index fedb00cc..f4063c66 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build mips64 || mips64le +// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index ffb4ec7e..07c4e36d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build mips || mipsle +// +build mips mipsle package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go index e9ecf2a4..d7b4fb4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux && arm +// +build !linux,arm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index 5341e7f8..f3cde129 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go index 5f8f2419..0dafe964 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build !linux && (mips64 || mips64le) +// +build !linux +// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go index 89608fba..060d46b6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && (ppc64 || ppc64le) +// +build !aix +// +build !linux +// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go index 5ab87808..dd10eb79 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !linux && riscv64 +// +build !linux,riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go index c14f12b1..4e8acd16 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build ppc64 || ppc64le +// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c0..ff7da60e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build riscv64 +// +build riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s index 1fb4b701..96f81e20 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 384787ea..7747d888 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build wasm +// +build wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c..2dcde828 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index 7d7ba33e..39acab2f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc +// +build 386 amd64 amd64p32 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go index 7fe04b0a..93ce03a3 100644 --- a/vendor/golang.org/x/sys/cpu/endian_big.go +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go index 48eccc4c..55db853e 100644 --- a/vendor/golang.org/x/sys/cpu/endian_little.go +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go index 4cd64c70..d87bd6b3 100644 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 +// +build linux,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go index 4c9788ea..b975ea2a 100644 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.21 +// +build go1.21 package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index 1b9ccb09..96134157 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -9,6 +9,7 @@ // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo +// +build aix,gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index e8b6cdbe..904be42f 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -7,6 +7,7 @@ // (See golang.org/issue/32102) //go:build aix && ppc64 && gc +// +build aix,ppc64,gc package cpu diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go index 5627d70e..2000064a 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !go1.19 +// +build !go1.19 package execabs diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index d60ab1b4..f364b341 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.19 +// +build go1.19 package execabs diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index b0e41985..abc89c10 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos +// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 269e173c..db9171c2 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index a4fcef0e..e0fcd9b3 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 1e63615c..2b99c349 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc +// +build darwin dragonfly freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index 6496c310..d702d4ad 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index 4fd1f54d..fe36a739 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index 42f7eb9e..e5b9a848 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index f8902667..d560019e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 3b473487..8fd101d0 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 67e29f31..7ed38e43 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index d6ae269c..8ef1d514 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 01e5e253..98ae0276 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc +// +build linux +// +build arm64 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 2abf12f6..56535728 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index f84bae71..21231d2c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc +// +build linux +// +build mips64 mips64le +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index f08f6280..6783b26c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc +// +build linux +// +build mips mipsle +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index bdfc024d..19d49893 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc +// +build linux +// +build ppc64 ppc64le +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 2e8c9961..e42eb81d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc +// +build riscv64 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index 2c394b11..c46aab33 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc +// +build linux +// +build s390x +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index fab586a2..5e7a1169 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f949ec54..f8c5394c 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gc +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 813dfad7..3b54e185 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,17 +3,18 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc +// +build zos +// +build s390x +// +build gc #include "textflag.h" #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define CEECAATHDID(x) 976(x) // in the CAA -#define EDCHPXV(x) 1016(x) // in the CAA -#define GOCB(x) 1104(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -21,362 +22,405 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -// Function Descriptor Offsets -#define __errno 0x156*16 -#define __err2ad 0x16C*16 +#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 -// Call Instructions -#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 -#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD -#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE - -DATA zosLibVec<>(SB)/8, $0 -GLOBL zosLibVec<>(SB), NOPTR, $8 - -TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD CAA(R8), R8 - MOVD EDCHPXV(R8), R8 - MOVD R8, zosLibVec<>(SB) - RET - -TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 - MOVD zosLibVec<>(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·clearErrno(SB), NOSPLIT, $0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +TEXT ·clearErrno(SB),NOSPLIT,$0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 +TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(__errno), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(0x156*16), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB), NOSPLIT, $0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) +TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - BYTE $0x0D // Branch to function - BYTE $0xEF + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + BL runtime·exitsyscall(SB) RET -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB), NOSPLIT, $0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - SVC_LOAD - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, ret+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - SVC_DELETE - MOVD R2, R15 // Restore go stack pointer +TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 -error: - MOVD $0, ret+8(FP) // Return 0 on failure + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) done: - XOR R0, R0 // Reset r0 to 0 RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB), NOSPLIT, $0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD fnptr+8(FP), R15 - SVC_DELETE - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, ret+16(FP) // Return SVC return code - RET +TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD CEECAATHDID(R9), R9 - MOVD R9, ret+0(FP) + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + BL runtime·exitsyscall(SB) RET -// -// Call LE function, if the return is -1 -// errno and errno2 is retrieved -// -TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD CAA(R8), R9 - MOVD g, GOCB(R9) +TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address - MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer - - MOVD parms_base+8(FP), R7 // R7 -> argument array - MOVD parms_len+16(FP), R8 // R8 number of arguments - - // arg 1 ---> R1 - CMP R8, $0 - BEQ docall - SUB $1, R8 - MOVD 0(R7), R1 - - // arg 2 ---> R2 - CMP R8, $0 - BEQ docall - SUB $1, R8 - ADD $8, R7 - MOVD 0(R7), R2 - - // arg 3 --> R3 - CMP R8, $0 - BEQ docall - SUB $1, R8 - ADD $8, R7 - MOVD 0(R7), R3 - - CMP R8, $0 - BEQ docall - MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument - -repeat: - ADD $8, R7 - MOVD 0(R7), R0 // advance arg pointer by 8 byte - ADD $8, R6 // advance LE argument address by 8 byte - MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame - SUB $1, R8 - CMP R8, $0 - BNE repeat - -docall: - MOVD funcdesc+0(FP), R8 // R8-> function descriptor - LMG 0(R8), R5, R6 - MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC - LE_CALL // balr R7, R6 (return #1) - NOPH - MOVD R3, ret+32(FP) - CMP R3, $-1 // compare result to -1 - BNE done - - // retrieve errno and errno2 - MOVD zosLibVec<>(SB), R8 - ADD $(__errno), R8 - LMG 0(R8), R5, R6 - LE_CALL // balr R7, R6 __errno (return #3) - NOPH - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) - MOVD zosLibVec<>(SB), R8 - ADD $(__err2ad), R8 - LMG 0(R8), R5, R6 - LE_CALL // balr R7, R6 __err2ad (return #2) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL NOPH - MOVW (R3), R2 // retrieve errno2 - MOVD R2, errno2+40(FP) // store in return area - + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL ·rrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) done: - MOVD R4, 0(R9) // Save stack pointer. RET -// -// Call LE function, if the return is 0 -// errno and errno2 is retrieved -// -TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD CAA(R8), R9 - MOVD g, GOCB(R9) +TEXT ·syscall_syscall9(SB),NOSPLIT,$0 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address - MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer - - MOVD parms_base+8(FP), R7 // R7 -> argument array - MOVD parms_len+16(FP), R8 // R8 number of arguments - - // arg 1 ---> R1 - CMP R8, $0 - BEQ docall - SUB $1, R8 - MOVD 0(R7), R1 - - // arg 2 ---> R2 - CMP R8, $0 - BEQ docall - SUB $1, R8 - ADD $8, R7 - MOVD 0(R7), R2 - - // arg 3 --> R3 - CMP R8, $0 - BEQ docall - SUB $1, R8 - ADD $8, R7 - MOVD 0(R7), R3 - - CMP R8, $0 - BEQ docall - MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument - -repeat: - ADD $8, R7 - MOVD 0(R7), R0 // advance arg pointer by 8 byte - ADD $8, R6 // advance LE argument address by 8 byte - MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame - SUB $1, R8 - CMP R8, $0 - BNE repeat - -docall: - MOVD funcdesc+0(FP), R8 // R8-> function descriptor - LMG 0(R8), R5, R6 - MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC - LE_CALL // balr R7, R6 (return #1) - NOPH - MOVD R3, ret+32(FP) - CMP R3, $0 // compare result to 0 - BNE done - - // retrieve errno and errno2 - MOVD zosLibVec<>(SB), R8 - ADD $(__errno), R8 - LMG 0(R8), R5, R6 - LE_CALL // balr R7, R6 __errno (return #3) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL NOPH - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) - MOVD zosLibVec<>(SB), R8 - ADD $(__err2ad), R8 - LMG 0(R8), R5, R6 - LE_CALL // balr R7, R6 __err2ad (return #2) + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL NOPH - MOVW (R3), R2 // retrieve errno2 - MOVD R2, errno2+40(FP) // store in return area - XOR R2, R2 - MOVWZ R2, (R3) // clear errno2 + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + RET + +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB),NOSPLIT,$0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) + + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 + + BYTE $0x0D // Branch to function + BYTE $0xEF + + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 + RET + +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + BYTE $0x0A // SVC 08 LOAD + BYTE $0x08 + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, addr+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + BYTE $0x0A // SVC 09 DELETE + BYTE $0x09 + MOVD R2, R15 // Restore go stack pointer + +error: + MOVD $0, addr+8(FP) // Return 0 on failure done: - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Reset r0 to 0 RET -// -// function to test if a pointer can be safely dereferenced (content read) -// return 0 for succces -// -TEXT ·ptrtest(SB), NOSPLIT, $0-16 - MOVD arg+0(FP), R10 // test pointer in R10 - - // set up R2 to point to CEECAADMC - BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 - BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 - BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 - BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) - BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) - BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) - - // set up R5 to point to the "shunt" path which set 1 to R3 (failure) - BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 - BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 - BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 - - // if r3 is not zero (failed) then branch to finish - BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 - BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 - - // stomic store shunt address in R5 into CEECAADMC - BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) - - // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above - BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) - - // finish here, restore 0 into CEECAADMC - BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 - BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) - MOVD R3, ret+8(FP) // result in R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD addr+8(FP), R15 + BYTE $0x0A // SVC 09 + BYTE $0x09 + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, rc+0(FP) // Return SVC return code RET -// -// function to test if a untptr can be loaded from a pointer -// return 1: the 8-byte content -// 2: 0 for success, 1 for failure -// -// func safeload(ptr uintptr) ( value uintptr, error uintptr) -TEXT ·safeload(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R10 // test pointer in R10 - MOVD $0x0, R6 - BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 - BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 - BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 - BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) - BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) - BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) - BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 - BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 - BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 - BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 - BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 - BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) - BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) - BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 - BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) - MOVD R6, value+8(FP) // result in R6 - MOVD R3, error+16(FP) // error in R3 +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD 0x3D0(R9), R9 + MOVD R9, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go deleted file mode 100644 index 39d647d8..00000000 --- a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos - -package unix - -import ( - "bytes" - "fmt" - "unsafe" -) - -//go:noescape -func bpxcall(plist []unsafe.Pointer, bpx_offset int64) - -//go:noescape -func A2e([]byte) - -//go:noescape -func E2a([]byte) - -const ( - BPX4STA = 192 // stat - BPX4FST = 104 // fstat - BPX4LST = 132 // lstat - BPX4OPN = 156 // open - BPX4CLO = 72 // close - BPX4CHR = 500 // chattr - BPX4FCR = 504 // fchattr - BPX4LCR = 1180 // lchattr - BPX4CTW = 492 // cond_timed_wait - BPX4GTH = 1056 // __getthent - BPX4PTQ = 412 // pthread_quiesc - BPX4PTR = 320 // ptrace -) - -const ( - //options - //byte1 - BPX_OPNFHIGH = 0x80 - //byte2 - BPX_OPNFEXEC = 0x80 - //byte3 - BPX_O_NOLARGEFILE = 0x08 - BPX_O_LARGEFILE = 0x04 - BPX_O_ASYNCSIG = 0x02 - BPX_O_SYNC = 0x01 - //byte4 - BPX_O_CREXCL = 0xc0 - BPX_O_CREAT = 0x80 - BPX_O_EXCL = 0x40 - BPX_O_NOCTTY = 0x20 - BPX_O_TRUNC = 0x10 - BPX_O_APPEND = 0x08 - BPX_O_NONBLOCK = 0x04 - BPX_FNDELAY = 0x04 - BPX_O_RDWR = 0x03 - BPX_O_RDONLY = 0x02 - BPX_O_WRONLY = 0x01 - BPX_O_ACCMODE = 0x03 - BPX_O_GETFL = 0x0f - - //mode - // byte1 (file type) - BPX_FT_DIR = 1 - BPX_FT_CHARSPEC = 2 - BPX_FT_REGFILE = 3 - BPX_FT_FIFO = 4 - BPX_FT_SYMLINK = 5 - BPX_FT_SOCKET = 6 - //byte3 - BPX_S_ISUID = 0x08 - BPX_S_ISGID = 0x04 - BPX_S_ISVTX = 0x02 - BPX_S_IRWXU1 = 0x01 - BPX_S_IRUSR = 0x01 - //byte4 - BPX_S_IRWXU2 = 0xc0 - BPX_S_IWUSR = 0x80 - BPX_S_IXUSR = 0x40 - BPX_S_IRWXG = 0x38 - BPX_S_IRGRP = 0x20 - BPX_S_IWGRP = 0x10 - BPX_S_IXGRP = 0x08 - BPX_S_IRWXOX = 0x07 - BPX_S_IROTH = 0x04 - BPX_S_IWOTH = 0x02 - BPX_S_IXOTH = 0x01 - - CW_INTRPT = 1 - CW_CONDVAR = 32 - CW_TIMEOUT = 64 - - PGTHA_NEXT = 2 - PGTHA_CURRENT = 1 - PGTHA_FIRST = 0 - PGTHA_LAST = 3 - PGTHA_PROCESS = 0x80 - PGTHA_CONTTY = 0x40 - PGTHA_PATH = 0x20 - PGTHA_COMMAND = 0x10 - PGTHA_FILEDATA = 0x08 - PGTHA_THREAD = 0x04 - PGTHA_PTAG = 0x02 - PGTHA_COMMANDLONG = 0x01 - PGTHA_THREADFAST = 0x80 - PGTHA_FILEPATH = 0x40 - PGTHA_THDSIGMASK = 0x20 - // thread quiece mode - QUIESCE_TERM int32 = 1 - QUIESCE_FORCE int32 = 2 - QUIESCE_QUERY int32 = 3 - QUIESCE_FREEZE int32 = 4 - QUIESCE_UNFREEZE int32 = 5 - FREEZE_THIS_THREAD int32 = 6 - FREEZE_EXIT int32 = 8 - QUIESCE_SRB int32 = 9 -) - -type Pgtha struct { - Pid uint32 // 0 - Tid0 uint32 // 4 - Tid1 uint32 - Accesspid byte // C - Accesstid byte // D - Accessasid uint16 // E - Loginname [8]byte // 10 - Flag1 byte // 18 - Flag1b2 byte // 19 -} - -type Bpxystat_t struct { // DSECT BPXYSTAT - St_id [4]uint8 // 0 - St_length uint16 // 0x4 - St_version uint16 // 0x6 - St_mode uint32 // 0x8 - St_ino uint32 // 0xc - St_dev uint32 // 0x10 - St_nlink uint32 // 0x14 - St_uid uint32 // 0x18 - St_gid uint32 // 0x1c - St_size uint64 // 0x20 - St_atime uint32 // 0x28 - St_mtime uint32 // 0x2c - St_ctime uint32 // 0x30 - St_rdev uint32 // 0x34 - St_auditoraudit uint32 // 0x38 - St_useraudit uint32 // 0x3c - St_blksize uint32 // 0x40 - St_createtime uint32 // 0x44 - St_auditid [4]uint32 // 0x48 - St_res01 uint32 // 0x58 - Ft_ccsid uint16 // 0x5c - Ft_flags uint16 // 0x5e - St_res01a [2]uint32 // 0x60 - St_res02 uint32 // 0x68 - St_blocks uint32 // 0x6c - St_opaque [3]uint8 // 0x70 - St_visible uint8 // 0x73 - St_reftime uint32 // 0x74 - St_fid uint64 // 0x78 - St_filefmt uint8 // 0x80 - St_fspflag2 uint8 // 0x81 - St_res03 [2]uint8 // 0x82 - St_ctimemsec uint32 // 0x84 - St_seclabel [8]uint8 // 0x88 - St_res04 [4]uint8 // 0x90 - // end of version 1 - _ uint32 // 0x94 - St_atime64 uint64 // 0x98 - St_mtime64 uint64 // 0xa0 - St_ctime64 uint64 // 0xa8 - St_createtime64 uint64 // 0xb0 - St_reftime64 uint64 // 0xb8 - _ uint64 // 0xc0 - St_res05 [16]uint8 // 0xc8 - // end of version 2 -} - -type BpxFilestatus struct { - Oflag1 byte - Oflag2 byte - Oflag3 byte - Oflag4 byte -} - -type BpxMode struct { - Ftype byte - Mode1 byte - Mode2 byte - Mode3 byte -} - -// Thr attribute structure for extended attributes -type Bpxyatt_t struct { // DSECT BPXYATT - Att_id [4]uint8 - Att_version uint16 - Att_res01 [2]uint8 - Att_setflags1 uint8 - Att_setflags2 uint8 - Att_setflags3 uint8 - Att_setflags4 uint8 - Att_mode uint32 - Att_uid uint32 - Att_gid uint32 - Att_opaquemask [3]uint8 - Att_visblmaskres uint8 - Att_opaque [3]uint8 - Att_visibleres uint8 - Att_size_h uint32 - Att_size_l uint32 - Att_atime uint32 - Att_mtime uint32 - Att_auditoraudit uint32 - Att_useraudit uint32 - Att_ctime uint32 - Att_reftime uint32 - // end of version 1 - Att_filefmt uint8 - Att_res02 [3]uint8 - Att_filetag uint32 - Att_res03 [8]uint8 - // end of version 2 - Att_atime64 uint64 - Att_mtime64 uint64 - Att_ctime64 uint64 - Att_reftime64 uint64 - Att_seclabel [8]uint8 - Att_ver3res02 [8]uint8 - // end of version 3 -} - -func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { - if len(name) < 1024 { - var namebuf [1024]byte - sz := int32(copy(namebuf[:], name)) - A2e(namebuf[:sz]) - var parms [7]unsafe.Pointer - parms[0] = unsafe.Pointer(&sz) - parms[1] = unsafe.Pointer(&namebuf[0]) - parms[2] = unsafe.Pointer(options) - parms[3] = unsafe.Pointer(mode) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4OPN) - return rv, rc, rn - } - return -1, -1, -1 -} - -func BpxClose(fd int32) (rv int32, rc int32, rn int32) { - var parms [4]unsafe.Pointer - parms[0] = unsafe.Pointer(&fd) - parms[1] = unsafe.Pointer(&rv) - parms[2] = unsafe.Pointer(&rc) - parms[3] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4CLO) - return rv, rc, rn -} - -func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { - st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} - st.St_version = 2 - stat_sz := uint32(unsafe.Sizeof(*st)) - var parms [6]unsafe.Pointer - parms[0] = unsafe.Pointer(&fd) - parms[1] = unsafe.Pointer(&stat_sz) - parms[2] = unsafe.Pointer(st) - parms[3] = unsafe.Pointer(&rv) - parms[4] = unsafe.Pointer(&rc) - parms[5] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4FST) - return rv, rc, rn -} - -func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { - if len(name) < 1024 { - var namebuf [1024]byte - sz := int32(copy(namebuf[:], name)) - A2e(namebuf[:sz]) - st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} - st.St_version = 2 - stat_sz := uint32(unsafe.Sizeof(*st)) - var parms [7]unsafe.Pointer - parms[0] = unsafe.Pointer(&sz) - parms[1] = unsafe.Pointer(&namebuf[0]) - parms[2] = unsafe.Pointer(&stat_sz) - parms[3] = unsafe.Pointer(st) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4STA) - return rv, rc, rn - } - return -1, -1, -1 -} - -func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { - if len(name) < 1024 { - var namebuf [1024]byte - sz := int32(copy(namebuf[:], name)) - A2e(namebuf[:sz]) - st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} - st.St_version = 2 - stat_sz := uint32(unsafe.Sizeof(*st)) - var parms [7]unsafe.Pointer - parms[0] = unsafe.Pointer(&sz) - parms[1] = unsafe.Pointer(&namebuf[0]) - parms[2] = unsafe.Pointer(&stat_sz) - parms[3] = unsafe.Pointer(st) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4LST) - return rv, rc, rn - } - return -1, -1, -1 -} - -func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { - if len(path) >= 1024 { - return -1, -1, -1 - } - var namebuf [1024]byte - sz := int32(copy(namebuf[:], path)) - A2e(namebuf[:sz]) - attr_sz := uint32(unsafe.Sizeof(*attr)) - var parms [7]unsafe.Pointer - parms[0] = unsafe.Pointer(&sz) - parms[1] = unsafe.Pointer(&namebuf[0]) - parms[2] = unsafe.Pointer(&attr_sz) - parms[3] = unsafe.Pointer(attr) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4CHR) - return rv, rc, rn -} - -func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { - if len(path) >= 1024 { - return -1, -1, -1 - } - var namebuf [1024]byte - sz := int32(copy(namebuf[:], path)) - A2e(namebuf[:sz]) - attr_sz := uint32(unsafe.Sizeof(*attr)) - var parms [7]unsafe.Pointer - parms[0] = unsafe.Pointer(&sz) - parms[1] = unsafe.Pointer(&namebuf[0]) - parms[2] = unsafe.Pointer(&attr_sz) - parms[3] = unsafe.Pointer(attr) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4LCR) - return rv, rc, rn -} - -func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { - attr_sz := uint32(unsafe.Sizeof(*attr)) - var parms [6]unsafe.Pointer - parms[0] = unsafe.Pointer(&fd) - parms[1] = unsafe.Pointer(&attr_sz) - parms[2] = unsafe.Pointer(attr) - parms[3] = unsafe.Pointer(&rv) - parms[4] = unsafe.Pointer(&rc) - parms[5] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4FCR) - return rv, rc, rn -} - -func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { - var parms [8]unsafe.Pointer - parms[0] = unsafe.Pointer(&sec) - parms[1] = unsafe.Pointer(&nsec) - parms[2] = unsafe.Pointer(&events) - parms[3] = unsafe.Pointer(secrem) - parms[4] = unsafe.Pointer(nsecrem) - parms[5] = unsafe.Pointer(&rv) - parms[6] = unsafe.Pointer(&rc) - parms[7] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4CTW) - return rv, rc, rn -} -func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { - var parms [7]unsafe.Pointer - inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte - parms[0] = unsafe.Pointer(&inlen) - parms[1] = unsafe.Pointer(&in) - parms[2] = unsafe.Pointer(outlen) - parms[3] = unsafe.Pointer(&out) - parms[4] = unsafe.Pointer(&rv) - parms[5] = unsafe.Pointer(&rc) - parms[6] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4GTH) - return rv, rc, rn -} -func ZosJobname() (jobname string, err error) { - var pgtha Pgtha - pgtha.Pid = uint32(Getpid()) - pgtha.Accesspid = PGTHA_CURRENT - pgtha.Flag1 = PGTHA_PROCESS - var out [256]byte - var outlen uint32 - outlen = 256 - rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) - if rv == 0 { - gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic - ix := bytes.Index(out[:], gthc) - if ix == -1 { - err = fmt.Errorf("BPX4GTH: gthc return data not found") - return - } - jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 - E2a(jn) - jobname = string(bytes.TrimRight(jn, " ")) - - } else { - err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) - } - return -} -func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { - var userdata [8]byte - var parms [5]unsafe.Pointer - copy(userdata[:], data+" ") - A2e(userdata[:]) - parms[0] = unsafe.Pointer(&code) - parms[1] = unsafe.Pointer(&userdata[0]) - parms[2] = unsafe.Pointer(&rv) - parms[3] = unsafe.Pointer(&rc) - parms[4] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4PTQ) - return rv, rc, rn -} - -const ( - PT_TRACE_ME = 0 // Debug this process - PT_READ_I = 1 // Read a full word - PT_READ_D = 2 // Read a full word - PT_READ_U = 3 // Read control info - PT_WRITE_I = 4 //Write a full word - PT_WRITE_D = 5 //Write a full word - PT_CONTINUE = 7 //Continue the process - PT_KILL = 8 //Terminate the process - PT_READ_GPR = 11 // Read GPR, CR, PSW - PT_READ_FPR = 12 // Read FPR - PT_READ_VR = 13 // Read VR - PT_WRITE_GPR = 14 // Write GPR, CR, PSW - PT_WRITE_FPR = 15 // Write FPR - PT_WRITE_VR = 16 // Write VR - PT_READ_BLOCK = 17 // Read storage - PT_WRITE_BLOCK = 19 // Write storage - PT_READ_GPRH = 20 // Read GPRH - PT_WRITE_GPRH = 21 // Write GPRH - PT_REGHSET = 22 // Read all GPRHs - PT_ATTACH = 30 // Attach to a process - PT_DETACH = 31 // Detach from a process - PT_REGSET = 32 // Read all GPRs - PT_REATTACH = 33 // Reattach to a process - PT_LDINFO = 34 // Read loader info - PT_MULTI = 35 // Multi process mode - PT_LD64INFO = 36 // RMODE64 Info Area - PT_BLOCKREQ = 40 // Block request - PT_THREAD_INFO = 60 // Read thread info - PT_THREAD_MODIFY = 61 - PT_THREAD_READ_FOCUS = 62 - PT_THREAD_WRITE_FOCUS = 63 - PT_THREAD_HOLD = 64 - PT_THREAD_SIGNAL = 65 - PT_EXPLAIN = 66 - PT_EVENTS = 67 - PT_THREAD_INFO_EXTENDED = 68 - PT_REATTACH2 = 71 - PT_CAPTURE = 72 - PT_UNCAPTURE = 73 - PT_GET_THREAD_TCB = 74 - PT_GET_ALET = 75 - PT_SWAPIN = 76 - PT_EXTENDED_EVENT = 98 - PT_RECOVER = 99 // Debug a program check - PT_GPR0 = 0 // General purpose register 0 - PT_GPR1 = 1 // General purpose register 1 - PT_GPR2 = 2 // General purpose register 2 - PT_GPR3 = 3 // General purpose register 3 - PT_GPR4 = 4 // General purpose register 4 - PT_GPR5 = 5 // General purpose register 5 - PT_GPR6 = 6 // General purpose register 6 - PT_GPR7 = 7 // General purpose register 7 - PT_GPR8 = 8 // General purpose register 8 - PT_GPR9 = 9 // General purpose register 9 - PT_GPR10 = 10 // General purpose register 10 - PT_GPR11 = 11 // General purpose register 11 - PT_GPR12 = 12 // General purpose register 12 - PT_GPR13 = 13 // General purpose register 13 - PT_GPR14 = 14 // General purpose register 14 - PT_GPR15 = 15 // General purpose register 15 - PT_FPR0 = 16 // Floating point register 0 - PT_FPR1 = 17 // Floating point register 1 - PT_FPR2 = 18 // Floating point register 2 - PT_FPR3 = 19 // Floating point register 3 - PT_FPR4 = 20 // Floating point register 4 - PT_FPR5 = 21 // Floating point register 5 - PT_FPR6 = 22 // Floating point register 6 - PT_FPR7 = 23 // Floating point register 7 - PT_FPR8 = 24 // Floating point register 8 - PT_FPR9 = 25 // Floating point register 9 - PT_FPR10 = 26 // Floating point register 10 - PT_FPR11 = 27 // Floating point register 11 - PT_FPR12 = 28 // Floating point register 12 - PT_FPR13 = 29 // Floating point register 13 - PT_FPR14 = 30 // Floating point register 14 - PT_FPR15 = 31 // Floating point register 15 - PT_FPC = 32 // Floating point control register - PT_PSW = 40 // PSW - PT_PSW0 = 40 // Left half of the PSW - PT_PSW1 = 41 // Right half of the PSW - PT_CR0 = 42 // Control register 0 - PT_CR1 = 43 // Control register 1 - PT_CR2 = 44 // Control register 2 - PT_CR3 = 45 // Control register 3 - PT_CR4 = 46 // Control register 4 - PT_CR5 = 47 // Control register 5 - PT_CR6 = 48 // Control register 6 - PT_CR7 = 49 // Control register 7 - PT_CR8 = 50 // Control register 8 - PT_CR9 = 51 // Control register 9 - PT_CR10 = 52 // Control register 10 - PT_CR11 = 53 // Control register 11 - PT_CR12 = 54 // Control register 12 - PT_CR13 = 55 // Control register 13 - PT_CR14 = 56 // Control register 14 - PT_CR15 = 57 // Control register 15 - PT_GPRH0 = 58 // GP High register 0 - PT_GPRH1 = 59 // GP High register 1 - PT_GPRH2 = 60 // GP High register 2 - PT_GPRH3 = 61 // GP High register 3 - PT_GPRH4 = 62 // GP High register 4 - PT_GPRH5 = 63 // GP High register 5 - PT_GPRH6 = 64 // GP High register 6 - PT_GPRH7 = 65 // GP High register 7 - PT_GPRH8 = 66 // GP High register 8 - PT_GPRH9 = 67 // GP High register 9 - PT_GPRH10 = 68 // GP High register 10 - PT_GPRH11 = 69 // GP High register 11 - PT_GPRH12 = 70 // GP High register 12 - PT_GPRH13 = 71 // GP High register 13 - PT_GPRH14 = 72 // GP High register 14 - PT_GPRH15 = 73 // GP High register 15 - PT_VR0 = 74 // Vector register 0 - PT_VR1 = 75 // Vector register 1 - PT_VR2 = 76 // Vector register 2 - PT_VR3 = 77 // Vector register 3 - PT_VR4 = 78 // Vector register 4 - PT_VR5 = 79 // Vector register 5 - PT_VR6 = 80 // Vector register 6 - PT_VR7 = 81 // Vector register 7 - PT_VR8 = 82 // Vector register 8 - PT_VR9 = 83 // Vector register 9 - PT_VR10 = 84 // Vector register 10 - PT_VR11 = 85 // Vector register 11 - PT_VR12 = 86 // Vector register 12 - PT_VR13 = 87 // Vector register 13 - PT_VR14 = 88 // Vector register 14 - PT_VR15 = 89 // Vector register 15 - PT_VR16 = 90 // Vector register 16 - PT_VR17 = 91 // Vector register 17 - PT_VR18 = 92 // Vector register 18 - PT_VR19 = 93 // Vector register 19 - PT_VR20 = 94 // Vector register 20 - PT_VR21 = 95 // Vector register 21 - PT_VR22 = 96 // Vector register 22 - PT_VR23 = 97 // Vector register 23 - PT_VR24 = 98 // Vector register 24 - PT_VR25 = 99 // Vector register 25 - PT_VR26 = 100 // Vector register 26 - PT_VR27 = 101 // Vector register 27 - PT_VR28 = 102 // Vector register 28 - PT_VR29 = 103 // Vector register 29 - PT_VR30 = 104 // Vector register 30 - PT_VR31 = 105 // Vector register 31 - PT_PSWG = 106 // PSWG - PT_PSWG0 = 106 // Bytes 0-3 - PT_PSWG1 = 107 // Bytes 4-7 - PT_PSWG2 = 108 // Bytes 8-11 (IA high word) - PT_PSWG3 = 109 // Bytes 12-15 (IA low word) -) - -func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { - var parms [8]unsafe.Pointer - parms[0] = unsafe.Pointer(&request) - parms[1] = unsafe.Pointer(&pid) - parms[2] = unsafe.Pointer(&addr) - parms[3] = unsafe.Pointer(&data) - parms[4] = unsafe.Pointer(&buffer) - parms[5] = unsafe.Pointer(&rv) - parms[6] = unsafe.Pointer(&rc) - parms[7] = unsafe.Pointer(&rn) - bpxcall(parms[:], BPX4PTR) - return rv, rc, rn -} - -func copyU8(val uint8, dest []uint8) int { - if len(dest) < 1 { - return 0 - } - dest[0] = val - return 1 -} - -func copyU8Arr(src, dest []uint8) int { - if len(dest) < len(src) { - return 0 - } - for i, v := range src { - dest[i] = v - } - return len(src) -} - -func copyU16(val uint16, dest []uint16) int { - if len(dest) < 1 { - return 0 - } - dest[0] = val - return 1 -} - -func copyU32(val uint32, dest []uint32) int { - if len(dest) < 1 { - return 0 - } - dest[0] = val - return 1 -} - -func copyU32Arr(src, dest []uint32) int { - if len(dest) < len(src) { - return 0 - } - for i, v := range src { - dest[i] = v - } - return len(src) -} - -func copyU64(val uint64, dest []uint64) int { - if len(dest) < 1 { - return 0 - } - dest[0] = val - return 1 -} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s deleted file mode 100644 index 4bd4a179..00000000 --- a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "go_asm.h" -#include "textflag.h" - -// function to call USS assembly language services -// -// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm -// -// arg1 unsafe.Pointer array that ressembles an OS PLIST -// -// arg2 function offset as in -// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm -// -// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) - -TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 - MOVD plist_base+0(FP), R1 // r1 points to plist - MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table - MOVD R14, R7 // save r14 - MOVD R15, R8 // save r15 - MOVWZ 16(R0), R9 - MOVWZ 544(R9), R9 - MOVWZ 24(R9), R9 // call vector in r9 - ADD R2, R9 // add offset to vector table - MOVWZ (R9), R9 // r9 points to entry point - BYTE $0x0D // BL R14,R9 --> basr r14,r9 - BYTE $0xE9 // clobbers 0,1,14,15 - MOVD R8, R15 // restore 15 - JMP R7 // return via saved return address - -// func A2e(arr [] byte) -// code page conversion from 819 to 1047 -TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 - MOVD arg_base+0(FP), R2 // pointer to arry of characters - MOVD arg_len+8(FP), R3 // count - XOR R0, R0 - XOR R1, R1 - BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) - - // ASCII -> EBCDIC conversion table: - BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 - BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f - BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b - BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f - BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 - BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 - BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 - BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f - BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b - BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d - BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e - BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 - BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 - BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 - BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e - BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f - BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 - BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 - BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 - BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 - BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 - BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 - BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad - BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d - BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 - BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 - BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 - BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 - BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 - BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 - BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 - BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 - BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 - BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 - BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b - BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b - BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 - BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 - BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b - BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff - BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 - BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 - BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a - BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc - BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa - BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 - BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b - BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab - BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 - BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 - BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 - BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 - BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee - BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf - BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb - BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 - BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 - BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 - BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 - BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 - BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce - BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 - BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb - BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf - -retry: - WORD $0xB9931022 // TROO 2,2,b'0001' - BVS retry - RET - -// func e2a(arr [] byte) -// code page conversion from 1047 to 819 -TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 - MOVD arg_base+0(FP), R2 // pointer to arry of characters - MOVD arg_len+8(FP), R3 // count - XOR R0, R0 - XOR R1, R1 - BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) - - // EBCDIC -> ASCII conversion table: - BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 - BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f - BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b - BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f - BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 - BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 - BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f - BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f - BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 - BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b - BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b - BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 - BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 - BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 - BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b - BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a - BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 - BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 - BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e - BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c - BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb - BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef - BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 - BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e - BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 - BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 - BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c - BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f - BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb - BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf - BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 - BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 - BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 - BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 - BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb - BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 - BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c - BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 - BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba - BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 - BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 - BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 - BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf - BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae - BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 - BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc - BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 - BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 - BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 - BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 - BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 - BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 - BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c - BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 - BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb - BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff - BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 - BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 - BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 - BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 - BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 - BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 - BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb - BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f - -retry: - WORD $0xB9931022 // TROO 2,2,b'0001' - BVS retry - RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index a0865789..0b7c6adb 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build freebsd +// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 6fb7cb77..394a3965 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index d7851346..65a99850 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix && ppc +// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 623a5e69..8fc08ad0 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 +// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index bb6a64fe..a388e59a 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build zos && s390x +// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 1ebf1178..2499f977 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 1095fd31..a5202655 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b9f0e277..b0f2bc4a 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index a96da71f..29ccc4d1 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go new file mode 100644 index 00000000..cedaf7e0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -0,0 +1,221 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "sync" +) + +// This file simulates epoll on z/OS using poll. + +// Analogous to epoll_event on Linux. +// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + EPOLLERR = 0x8 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + // The following constants are part of the epoll API, but represent + // currently unsupported functionality on z/OS. + // EPOLL_CLOEXEC = 0x80000 + // EPOLLET = 0x80000000 + // EPOLLONESHOT = 0x40000000 + // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis + // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode + // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability +) + +// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL +// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). + +// epToPollEvt converts epoll event field to poll equivalent. +// In epoll, Events is a 32-bit field, while poll uses 16 bits. +func epToPollEvt(events uint32) int16 { + var ep2p = map[uint32]int16{ + EPOLLIN: POLLIN, + EPOLLOUT: POLLOUT, + EPOLLHUP: POLLHUP, + EPOLLPRI: POLLPRI, + EPOLLERR: POLLERR, + } + + var pollEvts int16 = 0 + for epEvt, pEvt := range ep2p { + if (events & epEvt) != 0 { + pollEvts |= pEvt + } + } + + return pollEvts +} + +// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. +func pToEpollEvt(revents int16) uint32 { + var p2ep = map[int16]uint32{ + POLLIN: EPOLLIN, + POLLOUT: EPOLLOUT, + POLLHUP: EPOLLHUP, + POLLPRI: EPOLLPRI, + POLLERR: EPOLLERR, + } + + var epollEvts uint32 = 0 + for pEvt, epEvt := range p2ep { + if (revents & pEvt) != 0 { + epollEvts |= epEvt + } + } + + return epollEvts +} + +// Per-process epoll implementation. +type epollImpl struct { + mu sync.Mutex + epfd2ep map[int]*eventPoll + nextEpfd int +} + +// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. +// On Linux, this is an in-kernel data structure accessed through a fd. +type eventPoll struct { + mu sync.Mutex + fds map[int]*EpollEvent +} + +// epoll impl for this process. +var impl epollImpl = epollImpl{ + epfd2ep: make(map[int]*eventPoll), + nextEpfd: 0, +} + +func (e *epollImpl) epollcreate(size int) (epfd int, err error) { + e.mu.Lock() + defer e.mu.Unlock() + epfd = e.nextEpfd + e.nextEpfd++ + + e.epfd2ep[epfd] = &eventPoll{ + fds: make(map[int]*EpollEvent), + } + return epfd, nil +} + +func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { + return e.epollcreate(4) +} + +func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { + e.mu.Lock() + defer e.mu.Unlock() + + ep, ok := e.epfd2ep[epfd] + if !ok { + + return EBADF + } + + switch op { + case EPOLL_CTL_ADD: + // TODO(neeilan): When we make epfds and fds disjoint, detect epoll + // loops here (instances watching each other) and return ELOOP. + if _, ok := ep.fds[fd]; ok { + return EEXIST + } + ep.fds[fd] = event + case EPOLL_CTL_MOD: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + ep.fds[fd] = event + case EPOLL_CTL_DEL: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + delete(ep.fds, fd) + + } + return nil +} + +// Must be called while holding ep.mu +func (ep *eventPoll) getFds() []int { + fds := make([]int, len(ep.fds)) + for fd := range ep.fds { + fds = append(fds, fd) + } + return fds +} + +func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { + e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait + ep, ok := e.epfd2ep[epfd] + + if !ok { + e.mu.Unlock() + return 0, EBADF + } + + pollfds := make([]PollFd, 4) + for fd, epollevt := range ep.fds { + pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) + } + e.mu.Unlock() + + n, err = Poll(pollfds, msec) + if err != nil { + return n, err + } + + i := 0 + for _, pFd := range pollfds { + if pFd.Revents != 0 { + events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} + i++ + } + + if i == n { + break + } + } + + return n, nil +} + +func EpollCreate(size int) (fd int, err error) { + return impl.epollcreate(size) +} + +func EpollCreate1(flag int) (fd int, err error) { + return impl.epollcreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return impl.epollctl(epfd, op, fd, event) +} + +// Because EpollWait mutates events, the caller is expected to coordinate +// concurrent access if calling with the same epfd from multiple goroutines. +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return impl.epollwait(epfd, events, msec) +} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 6200876f..e9b99125 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd +//go:build dragonfly || freebsd || linux || netbsd || openbsd +// +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 13b4acd5..29d44808 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) +// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18c..a8068f94 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go new file mode 100644 index 00000000..e377cc9f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -0,0 +1,164 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + var stat_v Statvfs_t + err = Fstatvfs(fd, &stat_v) + if err == nil { + // populate stat + stat.Type = 0 + stat.Bsize = stat_v.Bsize + stat.Blocks = stat_v.Blocks + stat.Bfree = stat_v.Bfree + stat.Bavail = stat_v.Bavail + stat.Files = stat_v.Files + stat.Ffree = stat_v.Ffree + stat.Fsid = stat_v.Fsid + stat.Namelen = stat_v.Namemax + stat.Frsize = stat_v.Frsize + stat.Flags = stat_v.Flag + for passn := 0; passn < 5; passn++ { + switch passn { + case 0: + err = tryGetmntent64(stat) + break + case 1: + err = tryGetmntent128(stat) + break + case 2: + err = tryGetmntent256(stat) + break + case 3: + err = tryGetmntent512(stat) + break + case 4: + err = tryGetmntent1024(stat) + break + default: + break + } + //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) + if err == nil || err != nil && err != ERANGE { + break + } + } + } + return err +} + +func tryGetmntent64(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [64]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent128(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [128]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent256(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [256]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent512(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [512]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent1024(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [1024]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index aca5721d..b06f52d7 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index d468b7b4..f98a1c54 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index 972d61bd..e60e49a3 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 +// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae..15721a51 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux +// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680ea..0d12c085 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,8 +231,3 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } - -// IoctlLoopConfigure configures all loop device parameters in a single step -func IoctlLoopConfigure(fd int, value *LoopConfig) error { - return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) -} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 5b0759bd..7def9580 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || solaris +// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 20f470b9..649913d1 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index c8b2a750..cdc21bf7 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build zos && s390x +// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974..47fa6a7e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,7 +248,6 @@ struct ltchars { #include #include #include -#include #include #include #include @@ -284,6 +283,10 @@ struct ltchars { #include #endif +#ifndef MSG_FASTOPEN +#define MSG_FASTOPEN 0x20000000 +#endif + #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -292,6 +295,14 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -308,23 +319,10 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/netfilter/nf_nat.h -// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h -// and netinet/in.h. -#define NF_NAT_RANGE_MAP_IPS (1 << 0) -#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) -#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) -#define NF_NAT_RANGE_PERSISTENT (1 << 3) -#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) -#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) -#define NF_NAT_RANGE_NETMAP (1 << 6) -#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ - (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) -#define NF_NAT_RANGE_MASK \ - (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ - NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ - NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ - NF_NAT_RANGE_NETMAP) +// Copied from linux/l2tp.h +// Including linux/l2tp.h here causes conflicts between linux/in.h +// and netinet/in.h included via net/route.h above. +#define IPPROTO_L2TP 115 // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -521,7 +519,6 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -563,7 +560,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -584,7 +581,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_/ || + $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || @@ -605,9 +602,6 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || - $2 !~ /^NFT_META_IIFTYPE/ && - $2 ~ /^NFT_/ || - $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || @@ -669,6 +663,7 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" +echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 7f602ffd..ca051363 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe52..fa93d0aa 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd +// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 0482408d..53f1b4c5 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index 6a09af53..eb48294b 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,31 +8,54 @@ import ( "errors" "fmt" "strconv" + "syscall" + "unsafe" ) // Pledge implements the pledge syscall. // -// This changes both the promises and execpromises; use PledgePromises or -// PledgeExecpromises to only change the promises or execpromises -// respectively. +// The pledge syscall does not accept execpromises on OpenBSD releases +// before 6.3. +// +// execpromises must be empty when Pledge is called on OpenBSD +// releases predating 6.3, otherwise an error will be returned. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - if err := pledgeAvailable(); err != nil { + maj, min, err := majmin() + if err != nil { return err } - pptr, err := BytePtrFromString(promises) + err = pledgeAvailable(maj, min, execpromises) if err != nil { return err } - exptr, err := BytePtrFromString(execpromises) + pptr, err := syscall.BytePtrFromString(promises) if err != nil { return err } - return pledge(pptr, exptr) + // This variable will hold either a nil unsafe.Pointer or + // an unsafe.Pointer to a string (execpromises). + var expr unsafe.Pointer + + // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. + if maj > 6 || (maj == 6 && min > 2) { + exptr, err := syscall.BytePtrFromString(execpromises) + if err != nil { + return err + } + expr = unsafe.Pointer(exptr) + } + + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) + if e != 0 { + return e + } + + return nil } // PledgePromises implements the pledge syscall. @@ -41,16 +64,30 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - if err := pledgeAvailable(); err != nil { + maj, min, err := majmin() + if err != nil { + return err + } + + err = pledgeAvailable(maj, min, "") + if err != nil { return err } - pptr, err := BytePtrFromString(promises) + // This variable holds the execpromises and is always nil. + var expr unsafe.Pointer + + pptr, err := syscall.BytePtrFromString(promises) if err != nil { return err } - return pledge(pptr, nil) + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) + if e != 0 { + return e + } + + return nil } // PledgeExecpromises implements the pledge syscall. @@ -59,16 +96,30 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - if err := pledgeAvailable(); err != nil { + maj, min, err := majmin() + if err != nil { return err } - exptr, err := BytePtrFromString(execpromises) + err = pledgeAvailable(maj, min, execpromises) if err != nil { return err } - return pledge(nil, exptr) + // This variable holds the promises and is always nil. + var pptr unsafe.Pointer + + exptr, err := syscall.BytePtrFromString(execpromises) + if err != nil { + return err + } + + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) + if e != 0 { + return e + } + + return nil } // majmin returns major and minor version number for an OpenBSD system. @@ -96,15 +147,16 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable() error { - maj, min, err := majmin() - if err != nil { - return err +func pledgeAvailable(maj, min int, execpromises string) error { + // If OpenBSD <= 5.9, pledge is not available. + if (maj == 5 && min != 9) || maj < 5 { + return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) } - // Require OpenBSD 6.4 as a minimum. - if maj < 6 || (maj == 6 && min <= 3) { - return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) + // If OpenBSD <= 6.2 and execpromises is not empty, + // return an error - execpromises is not available before 6.3 + if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { + return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 3f0975f3..463c3eff 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios +// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index a4d35db5..ed0509a0 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build ios +// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 714d2aae..6f6c5fec 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) +// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 4a9f6634..706e1322 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos +// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index dbd2b6cc..4d625756 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd +// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index b903c006..2a4ba47c 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || zos +//go:build darwin +// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index c3a62dbb..3865943f 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 4a1eab37..0840fe4a 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go deleted file mode 100644 index 3e53dbc0..00000000 --- a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Socket control messages - -package unix - -import "unsafe" - -// UnixCredentials encodes credentials into a socket control message -// for sending to another process. This can be used for -// authentication. -func UnixCredentials(ucred *Ucred) []byte { - b := make([]byte, CmsgSpace(SizeofUcred)) - h := (*Cmsghdr)(unsafe.Pointer(&b[0])) - h.Level = SOL_SOCKET - h.Type = SCM_CREDENTIALS - h.SetLen(CmsgLen(SizeofUcred)) - *(*Ucred)(h.data(0)) = *ucred - return b -} - -// ParseUnixCredentials decodes a socket control message that contains -// credentials in a Ucred structure. To receive such a message, the -// SO_PASSCRED option must be enabled on the socket. -func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { - if m.Header.Level != SOL_SOCKET { - return nil, EINVAL - } - if m.Header.Type != SCM_CREDENTIALS { - return nil, EINVAL - } - ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) - return &ucred, nil -} - -// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. -func PktInfo4(info *Inet4Pktinfo) []byte { - b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) - h := (*Cmsghdr)(unsafe.Pointer(&b[0])) - h.Level = SOL_IP - h.Type = IP_PKTINFO - h.SetLen(CmsgLen(SizeofInet4Pktinfo)) - *(*Inet4Pktinfo)(h.data(0)) = *info - return b -} - -// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. -func PktInfo6(info *Inet6Pktinfo) []byte { - b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) - h := (*Cmsghdr)(unsafe.Pointer(&b[0])) - h.Level = SOL_IPV6 - h.Type = IPV6_PKTINFO - h.SetLen(CmsgLen(SizeofInet6Pktinfo)) - *(*Inet6Pktinfo)(h.data(0)) = *info - return b -} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s deleted file mode 100644 index 3c4f33cb..00000000 --- a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x && gc - -#include "textflag.h" - -// provide the address of function variable to be fixed up. - -TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Pipe2(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Flock(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Getxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Nanosleep(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Setxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Wait4(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Mount(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Unmount(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·UtimesNanoAt(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·UtimesNano(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Mkfifoat(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Chtag(SB), R8 - MOVD R8, ret+0(FP) - RET - -TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Readlinkat(SB), R8 - MOVD R8, ret+0(FP) - RET - diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 5ea74da9..63e8c838 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..e94e6cda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix +// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -106,8 +107,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { - // Check sl > 3 so we don't change unnamed socket behavior. + if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index 1fdaa476..f2871fa9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix && ppc +// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index c87f9a9f..75718ec0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 +// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index a00c3e54..4217de51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd +// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -316,7 +317,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return ByteSliceToString(buf[:vallen]), nil + return string(buf[:vallen-1]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 0eaecf5f..b37310ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin +// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index f36c6707..d51ec996 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin +// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 2f0fa76e..53c96641 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin && go1.12 +// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 14bab6b2..4e2d3212 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly +// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 2b57e0f7..64d1bb4d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,7 +13,6 @@ package unix import ( - "errors" "sync" "unsafe" ) @@ -170,26 +169,25 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { return err } @@ -207,7 +205,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 3967bca7..b8da5100 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd +// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index eff19ada..47155c48 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd +// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 4f24b517..08932093 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd +// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index ac30759e..d151a0d0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd +// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index aab725ca..d5cd64b3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd +// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..381fd467 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build hurd +// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index df89f9e6..7cf54a3e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd +// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index a863f705..87db5a6a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,6 +5,7 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos +// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e262..fb4e5022 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,23 +61,15 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) -//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) error { - // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. - // Try fchmodat2 if flags are specified. - if flags != 0 { - err := fchmodat2(dirfd, path, mode, flags) - if err == ENOSYS { - // fchmodat2 isn't available. If the flags are known to be valid, - // return EOPNOTSUPP to indicate that fchmodat doesn't support them. - if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { - return EINVAL - } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { - return EOPNOTSUPP - } - } - return err + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior + // and check the flags. Otherwise the mode would be applied to the symlink + // destination which is not what the user expects. + if flags&^AT_SYMLINK_NOFOLLOW != 0 { + return EINVAL + } else if flags&AT_SYMLINK_NOFOLLOW != 0 { + return EOPNOTSUPP } return fchmodat(dirfd, path, mode) } @@ -425,8 +417,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { - // Check sl > 3 so we don't change unnamed socket behavior. + if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1310,7 +1301,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return ByteSliceToString(buf[:vallen]), nil + return string(buf[:vallen-1]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -1849,105 +1840,6 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) - -//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) - -func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { - var keyp *byte - if keyp, err = BytePtrFromString(key); err != nil { - return - } - return fsconfig(fd, cmd, keyp, value, aux) -} - -// FsconfigSetFlag is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_FLAG. -// -// fd is the filesystem context to act upon. -// key the parameter key to set. -func FsconfigSetFlag(fd int, key string) (err error) { - return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) -} - -// FsconfigSetString is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_STRING. -// -// fd is the filesystem context to act upon. -// key the parameter key to set. -// value is the parameter value to set. -func FsconfigSetString(fd int, key string, value string) (err error) { - var valuep *byte - if valuep, err = BytePtrFromString(value); err != nil { - return - } - return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) -} - -// FsconfigSetBinary is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_BINARY. -// -// fd is the filesystem context to act upon. -// key the parameter key to set. -// value is the parameter value to set. -func FsconfigSetBinary(fd int, key string, value []byte) (err error) { - if len(value) == 0 { - return EINVAL - } - return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) -} - -// FsconfigSetPath is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_PATH. -// -// fd is the filesystem context to act upon. -// key the parameter key to set. -// path is a non-empty path for specified key. -// atfd is a file descriptor at which to start lookup from or AT_FDCWD. -func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { - var valuep *byte - if valuep, err = BytePtrFromString(path); err != nil { - return - } - return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) -} - -// FsconfigSetPathEmpty is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as -// FconfigSetPath but with AT_PATH_EMPTY implied. -func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { - var valuep *byte - if valuep, err = BytePtrFromString(path); err != nil { - return - } - return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) -} - -// FsconfigSetFd is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_SET_FD. -// -// fd is the filesystem context to act upon. -// key the parameter key to set. -// value is a file descriptor to be assigned to specified key. -func FsconfigSetFd(fd int, key string, value int) (err error) { - return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) -} - -// FsconfigCreate is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_CMD_CREATE. -// -// fd is the filesystem context to act upon. -func FsconfigCreate(fd int) (err error) { - return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) -} - -// FsconfigReconfigure is equivalent to fsconfig(2) called -// with cmd == FSCONFIG_CMD_RECONFIGURE. -// -// fd is the filesystem context to act upon. -func FsconfigReconfigure(fd int) (err error) { - return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) -} - //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -2590,5 +2482,3 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } - -//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 506dafa7..c7d9945e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 && linux +// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 38d55641..08086ac6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) +// +build linux +// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index d557cf8d..70601ce3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux +// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index facdb83b..8b0f0f3a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc +// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index cd2dd797..da298641 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm && linux +// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..f5266689 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux +// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index ffc4c2b6..2b1168d7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && gc +// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9ebfdcf4..9843fb48 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 +// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 5f2b57c4..a6008fcc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux +// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index d1a3ad82..7740af24 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 +// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index f2f67423..e16a1229 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm +// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..f6ab02ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux +// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 70963a95..93fe59d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) +// +build linux +// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index c218ebd2..aae7f0ff 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) +// +build linux +// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index e6c48500..66eff19a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && ppc +// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 7286a9aa..806aa257 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..5e6ceee1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux +// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 66f31210..2f89e8f5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build s390x && linux +// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 11d1f169..7ca064ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux +// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 7a5eb574..5199d282 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd +// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 62d8957a..70a9c52e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd +// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index ce6a0688..3eb5942f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd +// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index d46d689d..fc6ccfd8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd +// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c7..6f34479b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,13 +137,18 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var bufptr *Statfs_t + var _p0 unsafe.Pointer var bufsize uintptr if len(buf) > 0 { - bufptr = &buf[0] + _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - return getfsstat(bufptr, bufsize, flags) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -166,20 +171,6 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL -//sys fcntl(fd int, cmd int, arg int) (n int, err error) -//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL - -// FcntlInt performs a fcntl syscall on fd with the provided command and argument. -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - return fcntl(int(fd), cmd, arg) -} - -// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. -func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { - _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) - return err -} - //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -335,7 +326,4 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) -//sys pledge(promises *byte, execpromises *byte) (err error) -//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 9ddc89f4..6baabcdc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd +// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 70a3c96e..bab25360 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd +// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 265caa87..8eed3c4d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd +// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index ac4fda17..483dde99 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd +// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 0a451e6d..04aa43f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build openbsd +// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index 30a308cb..c2796139 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd +// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index ea954330..23199a7f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd +// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af0..b99cfa13 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,8 +128,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { - // Check sl > 3 so we don't change unnamed socket behavior. + if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -158,7 +157,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return ByteSliceToString(buf[:vallen]), nil + return string(buf[:vallen-1]), nil } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index e02d8cea..0bd25ef8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris +// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8..f6eda270 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 05c95bcc..b6919ca5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,6 +3,8 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc +// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index 23f39b7a..f6f707ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc +// +build linux +// +build ppc64le ppc64 +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac..4596d041 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,22 +3,13 @@ // license that can be found in the LICENSE file. //go:build zos && s390x - -// Many of the following syscalls are not available on all versions of z/OS. -// Some missing calls have legacy implementations/simulations but others -// will be missing completely. To achieve consistent failing behaviour on -// legacy systems, we first test the function pointer via a safeloading -// mechanism to see if the function exists on a given system. Then execution -// is branched to either continue the function call, or return an error. +// +build zos,s390x package unix import ( "bytes" "fmt" - "os" - "reflect" - "regexp" "runtime" "sort" "strings" @@ -27,205 +18,17 @@ import ( "unsafe" ) -//go:noescape -func initZosLibVec() - -//go:noescape -func GetZosLibVec() uintptr - -func init() { - initZosLibVec() - r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) - if r0 != 0 { - n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) - ZosTraceLevel = int(n) - r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) - if r0 != 0 { - fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) - f := os.NewFile(fd, "zostracefile") - if f != nil { - ZosTracefile = f - } - } - - } -} - -//go:noescape -func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) - -//go:noescape -func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) - -// ------------------------------- -// pointer validity test -// good pointer returns 0 -// bad pointer returns 1 -// -//go:nosplit -func ptrtest(uintptr) uint64 - -// Load memory at ptr location with error handling if the location is invalid -// -//go:noescape -func safeload(ptr uintptr) (value uintptr, error uintptr) - const ( - entrypointLocationOffset = 8 // From function descriptor - - xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" - eyecatcherOffset = 16 // From function entrypoint (negative) - ppa1LocationOffset = 8 // From function entrypoint (negative) - - nameLenOffset = 0x14 // From PPA1 start - nameOffset = 0x16 // From PPA1 start + O_CLOEXEC = 0 // Dummy value (not supported). + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX ) -func getPpaOffset(funcptr uintptr) int64 { - entrypoint, err := safeload(funcptr + entrypointLocationOffset) - if err != 0 { - return -1 - } - - // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) - val, err := safeload(entrypoint - eyecatcherOffset) - if err != 0 { - return -1 - } - if val != xplinkEyecatcher { - return -1 - } - - ppaoff, err := safeload(entrypoint - ppa1LocationOffset) - if err != 0 { - return -1 - } - - ppaoff >>= 32 - return int64(ppaoff) -} - -//------------------------------- -// function descriptor pointer validity test -// good pointer returns 0 -// bad pointer returns 1 - -// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName -// have correct funcName pass to the funcptrtest function -func funcptrtest(funcptr uintptr, funcName string) uint64 { - entrypoint, err := safeload(funcptr + entrypointLocationOffset) - if err != 0 { - return 1 - } - - ppaoff := getPpaOffset(funcptr) - if ppaoff == -1 { - return 1 - } - - // PPA1 offset value is from the start of the entire function block, not the entrypoint - ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) - - nameLen, err := safeload(ppa1 + nameLenOffset) - if err != 0 { - return 1 - } - - nameLen >>= 48 - if nameLen > 128 { - return 1 - } - - // no function name input to argument end here - if funcName == "" { - return 0 - } - - var funcname [128]byte - for i := 0; i < int(nameLen); i += 8 { - v, err := safeload(ppa1 + nameOffset + uintptr(i)) - if err != 0 { - return 1 - } - funcname[i] = byte(v >> 56) - funcname[i+1] = byte(v >> 48) - funcname[i+2] = byte(v >> 40) - funcname[i+3] = byte(v >> 32) - funcname[i+4] = byte(v >> 24) - funcname[i+5] = byte(v >> 16) - funcname[i+6] = byte(v >> 8) - funcname[i+7] = byte(v) - } - - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l - []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) - - name := string(funcname[:nameLen]) - if name != funcName { - return 1 - } - - return 0 -} - -// For detection of capabilities on a system. -// Is function descriptor f a valid function? -func isValidLeFunc(f uintptr) error { - ret := funcptrtest(f, "") - if ret != 0 { - return fmt.Errorf("Bad pointer, not an LE function ") - } - return nil -} - -// Retrieve function name from descriptor -func getLeFuncName(f uintptr) (string, error) { - // assume it has been checked, only check ppa1 validity here - entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] - preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) - - offsetPpa1 := preamp[2] - if offsetPpa1 > 0x0ffff { - return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) - } - - ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) - res := ptrtest(ppa1) - if res != 0 { - return "", fmt.Errorf("PPA1 address not valid") - } - - size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) - if size > 128 { - return "", fmt.Errorf("Function name seems too long, length=%d\n", size) - } - - var name [128]byte - funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) - copy(name[0:size], funcname[0:size]) - - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l - []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) - - return string(name[:size]), nil -} - -// Check z/OS version -func zosLeVersion() (version, release uint32) { - p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 - p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) - p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) - p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) - vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) - version = (vrm & 0x00ff0000) >> 16 - release = (vrm & 0x0000ff00) >> 8 - return -} - -// returns a zos C FILE * for stdio fd 0, 1, 2 -func ZosStdioFilep(fd int32) uintptr { - return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) -} +func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -263,21 +66,6 @@ func (d *Dirent) NameString() string { } } -func DecodeData(dest []byte, sz int, val uint64) { - for i := 0; i < sz; i++ { - dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) - } -} - -func EncodeData(data []byte) uint64 { - var value uint64 - sz := len(data) - for i := 0; i < sz; i++ { - value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) - } - return value -} - func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -287,9 +75,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -303,9 +89,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -363,9 +147,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -374,9 +156,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -398,43 +178,6 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { - var rsa RawSockaddrAny - var len _Socklen = SizeofSockaddrAny - nfd, err = accept4(fd, &rsa, &len, flags) - if err != nil { - return - } - if len > SizeofSockaddrAny { - panic("RawSockaddrAny too small") - } - // TODO(neeilan): Remove 0 in call - sa, err = anyToSockaddr(0, &rsa) - if err != nil { - Close(nfd) - nfd = 0 - } - return -} - -func Ctermid() (tty string, err error) { - var termdev [1025]byte - runtime.EnterSyscall() - r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) - runtime.ExitSyscall() - if r0 == 0 { - return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) - } - s := string(termdev[:]) - idx := strings.Index(s, string(rune(0))) - if idx == -1 { - tty = s - } else { - tty = s[:idx] - } - return -} - func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -448,16 +191,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) -//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A -//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) -//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A -//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A - //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A -//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -468,7 +205,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A -//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -477,10 +213,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL -//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT -//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 -//sys shmdt(addr uintptr) (err error) = SYS_SHMDT -//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -489,31 +221,14 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) -//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 -//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD -//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE -//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 -//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL -//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD +//sys Err2ad() (eadd *int) = SYS___ERR2AD //sys Exit(code int) -//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A - -func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { - return Faccessat(dirfd, path, mode, flags) -} - //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) -//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL -//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) -//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -522,208 +237,28 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { - var statLE Stat_LE_t - err = fstatat(dirfd, path, &statLE, flags) - copyStat(stat, &statLE) - return -} - -func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) - -var Getxattr = enter_Getxattr - -func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { - funcref := get_GetxattrAddr() - if validGetxattr() { - *funcref = impl_Getxattr - } else { - *funcref = error_Getxattr - } - return (*funcref)(path, attr, dest) -} - -func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { - return -1, ENOSYS -} - -func validGetxattr() bool { - if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { - if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { - return name == "__getxattr_a" - } - } - return false -} - -//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A -//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A - -func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) - -var Setxattr = enter_Setxattr - -func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { - funcref := get_SetxattrAddr() - if validSetxattr() { - *funcref = impl_Setxattr - } else { - *funcref = error_Setxattr - } - return (*funcref)(path, attr, data, flags) -} - -func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { - return ENOSYS -} - -func validSetxattr() bool { - if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { - if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { - return name == "__setxattr_a" - } - } - return false -} - -//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) -//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES -//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM -//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT -//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 -//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A -//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH -//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A -//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A -//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A -//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A +//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC -//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 - -// Pipe2 begin - -//go:nosplit -func getPipe2Addr() *(func([]int, int) error) - -var Pipe2 = pipe2Enter - -func pipe2Enter(p []int, flags int) (err error) { - if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { - *getPipe2Addr() = pipe2Impl - } else { - *getPipe2Addr() = pipe2Error - } - return (*getPipe2Addr())(p, flags) -} - -func pipe2Impl(p []int, flags int) (err error) { - var pp [2]_C_int - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } else { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } - return -} -func pipe2Error(p []int, flags int) (err error) { - return fmt.Errorf("Pipe2 is not available on this system") -} - -// Pipe2 end - //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL - -func Readdir(dir uintptr) (dirent *Dirent, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) - runtime.ExitSyscall() - dirent = (*Dirent)(unsafe.Pointer(r0)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A -//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A -//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A -//sys Unshare(flags int) (err error) = SYS_UNSHARE +//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A func Ptsname(fd int) (name string, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) - runtime.ExitSyscall() - if r0 == 0 { - err = errnoErr2(e1, e2) - } else { - name = u2s(unsafe.Pointer(r0)) + r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) + name = u2s(unsafe.Pointer(r0)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -738,19 +273,13 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) - runtime.ExitSyscall() + _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) for i := 0; e1 == EAGAIN && i < 10; i++ { - runtime.EnterSyscall() - CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) - runtime.ExitSyscall() - runtime.EnterSyscall() - r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) - runtime.ExitSyscall() + _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) + _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) } - if r0 != 0 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -760,15 +289,9 @@ func Madvise(b []byte, advice int) (err error) { return } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -795,14 +318,11 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } -//sys Getegid() (egid int) = SYS_GETEGID -//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A -//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -813,150 +333,15 @@ func Lstat(path string, stat *Stat_t) (err error) { return } -// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ -func isSpecialPath(path []byte) (v bool) { - var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} - - var i, j int - for i = 0; i < len(special); i++ { - for j = 0; j < len(special[i]); j++ { - if path[j] != special[i][j] { - break - } - } - if j == len(special[i]) { - return true - } - } - return false -} - -func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { - var source [1024]byte - copy(source[:], srcpath) - source[len(srcpath)] = 0 - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() - []uintptr{uintptr(unsafe.Pointer(&source[0])), - uintptr(unsafe.Pointer(&abspath[0]))}) - if ret != 0 { - index := bytes.IndexByte(abspath[:], byte(0)) - if index != -1 { - return index, 0 - } - } else { - errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() - return 0, *errptr - } - return 0, 245 // EBADDATA 245 -} - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, - []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) - runtime.KeepAlive(unsafe.Pointer(_p0)) - if n == -1 { - value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) - err = errnoErr(Errno(value)) - } else { - if buf[0] == '$' { - if isSpecialPath(buf[1:9]) { - cnt, err1 := realpath(path, buf) - if err1 == 0 { - n = cnt - } - } - } - } - return -} - -func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - return n, err - } else { - if buf[0] == '$' { - if isSpecialPath(buf[1:9]) { - cnt, err1 := realpath(path, buf) - if err1 == 0 { - n = cnt - } - } - } - } - return -} - -//go:nosplit -func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) - -var Readlinkat = enter_Readlinkat - -func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - funcref := get_ReadlinkatAddr() - if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { - *funcref = impl_Readlinkat - } else { - *funcref = error_Readlinkat - } - return (*funcref)(dirfd, path, buf) -} - -func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - n = -1 - err = ENOSYS - return -} - //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A -//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A -//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A -//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT +//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A //sys Rename(from string, to string) (err error) = SYS___RENAME_A -//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A -//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Setegid(egid int) (err error) = SYS_SETEGID -//sys Seteuid(euid int) (err error) = SYS_SETEUID -//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A -//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -976,57 +361,32 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A -//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A -//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { - if mode&O_ACCMODE == 0 { - mode |= O_RDONLY - } return open(path, mode, perm) } -//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A - -func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - if flags&O_ACCMODE == 0 { - flags |= O_RDONLY +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + wd, err := Getwd() + if err != nil { + return err } - return openat(dirfd, path, flags, mode) -} -//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A - -func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { - if how.Flags&O_ACCMODE == 0 { - how.Flags |= O_RDONLY + if err := Fchdir(dirfd); err != nil { + return err } - return openat2(dirfd, path, how, SizeofOpenHow) -} + defer Chdir(wd) -func ZosFdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - runtime.EnterSyscall() - ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) - runtime.ExitSyscall() - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) - return string(buffer[:zb]), nil - } - return "", errnoErr2(e1, e2) + return Mkfifo(path, mode) } //sys remove(path string) (err error) @@ -1044,12 +404,10 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) - runtime.ExitSyscall() + _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) n = clen(buf) + 1 - if r0 == 0 { - err = errnoErr2(e1, e2) + if e != 0 { + err = errnoErr(e) } return } @@ -1163,41 +521,9 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } -//sys waitid(idType int, id int, info *Siginfo, options int) (err error) - -func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { - return waitid(idType, id, info, options) -} - //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) - runtime.ExitSyscall() - wpid = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) - -var Wait4 = enter_Wait4 - -func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { - funcref := get_Wait4Addr() - if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { - *funcref = impl_Wait4 - } else { - *funcref = legacyWait4 - } - return (*funcref)(pid, wstatus, options, rusage) -} - -func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -1246,62 +572,23 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { - if tv == nil { - return utimes(path, nil) - } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A - -func validUtimensat() bool { - if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { - if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { - return name == "__utimensat_a" - } - } - return false -} - -// Begin UtimesNano - -//go:nosplit -func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) - -var UtimesNano = enter_UtimesNano - -func enter_UtimesNano(path string, ts []Timespec) (err error) { - funcref := get_UtimesNanoAddr() - if validUtimensat() { - *funcref = utimesNanoImpl - } else { - *funcref = legacyUtimesNano - } - return (*funcref)(path, ts) -} - -func utimesNanoImpl(path string, ts []Timespec) (err error) { - if ts == nil { - return utimensat(AT_FDCWD, path, nil, 0) - } - if len(ts) != 2 { - return EINVAL - } - return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) -} - -func legacyUtimesNano(path string, ts []Timespec) (err error) { +func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL } @@ -1314,70 +601,6 @@ func legacyUtimesNano(path string, ts []Timespec) (err error) { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -// End UtimesNano - -// Begin UtimesNanoAt - -//go:nosplit -func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) - -var UtimesNanoAt = enter_UtimesNanoAt - -func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { - funcref := get_UtimesNanoAtAddr() - if validUtimensat() { - *funcref = utimesNanoAtImpl - } else { - *funcref = legacyUtimesNanoAt - } - return (*funcref)(dirfd, path, ts, flags) -} - -func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { - if ts == nil { - return utimensat(dirfd, path, nil, flags) - } - if len(ts) != 2 { - return EINVAL - } - return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) -} - -func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { - if path[0] != '/' { - dirPath, err := ZosFdToPath(dirfd) - if err != nil { - return err - } - path = dirPath + "/" + path - } - if flags == AT_SYMLINK_NOFOLLOW { - if len(ts) != 2 { - return EINVAL - } - - if ts[0].Nsec >= 5e8 { - ts[0].Sec++ - } - ts[0].Nsec = 0 - if ts[1].Nsec >= 5e8 { - ts[1].Sec++ - } - ts[1].Nsec = 0 - - // Not as efficient as it could be because Timespec and - // Timeval have different types in the different OSes - tv := []Timeval{ - NsecToTimeval(TimespecToNsec(ts[0])), - NsecToTimeval(TimespecToNsec(ts[1])), - } - return Lutimes(path, tv) - } - return UtimesNano(path, ts) -} - -// End UtimesNanoAt - func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1882,7 +1105,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return ByteSliceToString(buf[:vallen]), nil + return string(buf[:vallen-1]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { @@ -1969,41 +1192,62 @@ func Opendir(name string) (uintptr, error) { if err != nil { return 0, err } - err = nil - runtime.EnterSyscall() - dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) - runtime.ExitSyscall() - runtime.KeepAlive(unsafe.Pointer(p)) - if dir == 0 { - err = errnoErr2(e1, e2) + dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) + runtime.KeepAlive(unsafe.Pointer(p)) + if e != 0 { + err = errnoErr(e) + } + return dir, err +} + +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + +func Readdir(dir uintptr) (*Dirent, error) { + var ent Dirent + var res uintptr + // __readdir_r_a returns errno at the end of the directory stream, rather than 0. + // Therefore to avoid false positives we clear errno before calling it. + + // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" + //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. + + e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) + var err error + if e != 0 { + err = errnoErr(Errno(e)) + } + if res == 0 { + return nil, err } - return dir, err + return &ent, err } -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() +func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + if int64(r0) == -1 { + err = errnoErr(Errno(e1)) + } + return +} func Closedir(dir uintptr) error { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) - runtime.ExitSyscall() - if r0 != 0 { - return errnoErr2(e1, e2) + _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) + if e != 0 { + return errnoErr(e) } return nil } func Seekdir(dir uintptr, pos int) { - runtime.EnterSyscall() - CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) - runtime.ExitSyscall() + _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) } func Telldir(dir uintptr) (int, error) { - p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) + p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) pos := int(p) - if int64(p) == -1 { - return pos, errnoErr2(e1, e2) + if pos == -1 { + return pos, errnoErr(e) } return pos, nil } @@ -2018,55 +1262,19 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) - runtime.ExitSyscall() + _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if r0 == 0 { + if errno == 0 { return nil } - return errnoErr2(e1, e2) -} - -func impl_Flock(fd int, how int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FlockAddr() *(func(fd int, how int) (err error)) - -var Flock = enter_Flock - -func validFlock(fp uintptr) bool { - if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { - if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { - return name == "flock" - } - } - return false -} - -func enter_Flock(fd int, how int) (err error) { - funcref := get_FlockAddr() - if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { - *funcref = impl_Flock - } else { - *funcref = legacyFlock - } - return (*funcref)(fd, how) + return errno } -func legacyFlock(fd int, how int) error { +func Flock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -2100,51 +1308,41 @@ func legacyFlock(fd int, how int) error { } func Mlock(b []byte) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) - runtime.ExitSyscall() - if r0 != 0 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } func Mlock2(b []byte, flags int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) - runtime.ExitSyscall() - if r0 != 0 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } func Mlockall(flags int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) - runtime.ExitSyscall() - if r0 != 0 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } func Munlock(b []byte) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) - runtime.ExitSyscall() - if r0 != 0 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } func Munlockall() (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) - runtime.ExitSyscall() - if r0 != 0 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2175,104 +1373,15 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -// Chtag - -//go:nosplit -func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) - -var Chtag = enter_Chtag - -func enter_Chtag(path string, ccsid uint64, textbit uint64) error { - funcref := get_ChtagAddr() - if validSetxattr() { - *funcref = impl_Chtag - } else { - *funcref = legacy_Chtag - } - return (*funcref)(path, ccsid, textbit) -} - -func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { - tag := ccsid<<16 | textbit<<15 - var tag_buff [8]byte - DecodeData(tag_buff[:], 8, tag) - return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) -} - -func impl_Chtag(path string, ccsid uint64, textbit uint64) error { - tag := ccsid<<16 | textbit<<15 - var tag_buff [4]byte - DecodeData(tag_buff[:], 4, tag) - return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) -} - -// End of Chtag - -// Nanosleep - -//go:nosplit -func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) - -var Nanosleep = enter_Nanosleep - -func enter_Nanosleep(time *Timespec, leftover *Timespec) error { - funcref := get_NanosleepAddr() - if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { - *funcref = impl_Nanosleep - } else { - *funcref = legacyNanosleep - } - return (*funcref)(time, leftover) -} - -func impl_Nanosleep(time *Timespec, leftover *Timespec) error { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) - runtime.ExitSyscall() - if int64(r0) == -1 { - return errnoErr2(e1, e2) - } - return nil -} - -func legacyNanosleep(time *Timespec, leftover *Timespec) error { - t0 := runtime.Nanotime1() - var secrem uint32 - var nsecrem uint32 - total := time.Sec*1000000000 + time.Nsec - elapsed := runtime.Nanotime1() - t0 - var rv int32 - var rc int32 - var err error - // repeatedly sleep for 1 second until less than 1 second left - for total-elapsed > 1000000000 { - rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) - if rv != 0 && rc != 112 { // 112 is EAGAIN - if leftover != nil && rc == 120 { // 120 is EINTR - leftover.Sec = int64(secrem) - leftover.Nsec = int64(nsecrem) - } - err = Errno(rc) - return err - } - elapsed = runtime.Nanotime1() - t0 - } - // sleep the remainder - if total > elapsed { - rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) - } - if leftover != nil && rc == 120 { - leftover.Sec = int64(secrem) - leftover.Nsec = int64(nsecrem) - } - if rv != 0 && rc != 112 { - err = Errno(rc) +func Statfs(path string, stat *Statfs_t) (err error) { + fd, err := open(path, O_RDONLY, 0) + defer Close(fd) + if err != nil { + return err } - return err + return Fstatfs(fd, stat) } -// End of Nanosleep - var ( Stdin = 0 Stdout = 1 @@ -2287,9 +1396,6 @@ var ( errENOENT error = syscall.ENOENT ) -var ZosTraceLevel int -var ZosTracefile *os.File - var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -2311,56 +1417,6 @@ func errnoErr(e Errno) error { return e } -var reg *regexp.Regexp - -// enhanced with zos specific errno2 -func errnoErr2(e Errno, e2 uintptr) error { - switch e { - case 0: - return nil - case EAGAIN: - return errEAGAIN - /* - Allow the retrieval of errno2 for EINVAL and ENOENT on zos - case EINVAL: - return errEINVAL - case ENOENT: - return errENOENT - */ - } - if ZosTraceLevel > 0 { - var name string - if reg == nil { - reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") - } - i := 1 - pc, file, line, ok := runtime.Caller(i) - if ok { - name = runtime.FuncForPC(pc).Name() - } - for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { - i += 1 - pc, file, line, ok = runtime.Caller(i) - } - if ok { - if ZosTracefile == nil { - ZosConsolePrintf("From %s:%d\n", file, line) - ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) - } else { - fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) - fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) - } - } else { - if ZosTracefile == nil { - ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) - } else { - fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) - } - } - } - return e -} - // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -2419,9 +1475,6 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } - // Set __MAP_64 by default - flags |= __MAP_64 - // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -2726,170 +1779,83 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Getag(path string) (ccsid uint16, flag uint16, err error) { - var val [8]byte - sz, err := Getxattr(path, "ccsid", val[:]) - if err != nil { - return - } - ccsid = uint16(EncodeData(val[0:sz])) - sz, err = Getxattr(path, "flags", val[:]) - if err != nil { - return - } - flag = uint16(EncodeData(val[0:sz]) >> 15) - return -} - -// Mount begin -func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(data) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) - -var Mount = enter_Mount - -func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - funcref := get_MountAddr() - if validMount() { - *funcref = impl_Mount - } else { - *funcref = legacyMount - } - return (*funcref)(source, target, fstype, flags, data) -} - -func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[0:8] + fstype = fstype[:8] } else { - fstype += " "[0:needspace] + fstype += " "[:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func validMount() bool { - if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { - if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { - return name == "__mount1_a" - } - } - return false -} - -// Mount end - -// Unmount begin -func impl_Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_UnmountAddr() *(func(target string, flags int) (err error)) - -var Unmount = enter_Unmount - -func enter_Unmount(target string, flags int) (err error) { - funcref := get_UnmountAddr() - if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { - *funcref = impl_Unmount - } else { - *funcref = legacyUnmount - } - return (*funcref)(target, flags) -} - -func legacyUnmount(name string, mtm int) (err error) { +func Unmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount_LE(name, mtm) + return unmount(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - var str string - for i := 0; i < len(arr); i++ { - if arr[i] == 0 { - str = string(arr[:i]) - break - } + nulli := bytes.IndexByte(arr, 0) + if nulli == -1 { + return string(arr) + } else { + return string(arr[:nulli]) } - return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err == nil { - err = EINVAL - for i := 0; i < fs_count; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break - } + fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err != nil { + return err + } + if fsCount == 0 { + return EINVAL + } + for i := 0; i < fsCount; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break } - } else if fs_count == 0 { - err = EINVAL } return err } -// Unmount end - -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -2931,7 +1897,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := ZosFdToPath(fd) + path, err := fdToPath(fd) if err != nil { return 0, err } @@ -2945,7 +1911,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := Readdir_r(d, &entryLE, &entrypLE) + e := readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -2991,127 +1957,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func Err2ad() (eadd *int) { - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) - eadd = (*int)(unsafe.Pointer(r0)) - return -} - -func ZosConsolePrintf(format string, v ...interface{}) (int, error) { - type __cmsg struct { - _ uint16 - _ [2]uint8 - __msg_length uint32 - __msg uintptr - _ [4]uint8 - } - msg := fmt.Sprintf(format, v...) - strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) - len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len - cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} - cmd := uint32(0) - runtime.EnterSyscall() - rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) - runtime.ExitSyscall() - if rc != 0 { - return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) - } - return 0, nil -} -func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { - if nullterm { - ebcdicBytes = []byte(str + "\x00") - } else { - ebcdicBytes = []byte(str) - } - A2e(ebcdicBytes) - return -} -func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { - res := make([]byte, len(b)) - copy(res, b) - E2a(res) - if trimRight { - str = string(bytes.TrimRight(res, " \x00")) - } else { - str = string(res) - } - return -} - -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) - } +func ReadDirent(fd int, buf []byte) (n int, err error) { + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) } -func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) } -//go:nosplit -func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) - -var Mkfifoat = enter_Mkfifoat - -func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { - funcref := get_MkfifoatAddr() - if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { - *funcref = impl_Mkfifoat - } else { - *funcref = legacy_Mkfifoat - } - return (*funcref)(dirfd, path, mode) +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) } -func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { - dirname, err := ZosFdToPath(dirfd) - if err != nil { - return err +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } - return Mkfifo(dirname+"/"+path, mode) + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } - -//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT -//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT -//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 4fcd38de..2c3a4437 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux +// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 672d6b0a..5bb41d17 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux || zos +//go:build (darwin && !ios) || linux +// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 8b7977a2..71bddefd 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || zos +//go:build darwin && !ios +// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 7997b190..616b1b28 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index cb7e598c..168d5ae7 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,48 +4,39 @@ package unix -import "fmt" +import ( + "syscall" + "unsafe" +) // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - if err := supportsUnveil(); err != nil { - return err - } - pathPtr, err := BytePtrFromString(path) + pathPtr, err := syscall.BytePtrFromString(path) if err != nil { return err } - flagsPtr, err := BytePtrFromString(flags) + flagsPtr, err := syscall.BytePtrFromString(flags) if err != nil { return err } - return unveil(pathPtr, flagsPtr) + _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) + if e != 0 { + return e + } + return nil } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - if err := supportsUnveil(); err != nil { - return err + // Both pointers must be nil. + var pathUnsafe, flagsUnsafe unsafe.Pointer + _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) + if e != 0 { + return e } - return unveil(nil, nil) -} - -// supportsUnveil checks for availability of the unveil(2) system call based -// on the running OpenBSD version. -func supportsUnveil() error { - maj, min, err := majmin() - if err != nil { - return err - } - - // unveil is not available before 6.4 - if maj < 6 || (maj == 6 && min <= 3) { - return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) - } - return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index e1687939..f5f8e9f3 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd +// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 2fb219d7..ca9799b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix +// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index b0e6f5c8..200c8c26 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix +// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa852..14300762 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin +// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c..ab044a74 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin +// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index c0e0f869..17bba0e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly +// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 6c692390..f8c2c513 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd +// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index dd9163f8..96310c3b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd +// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 493a2a79..777b69de 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd +// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 8b437b30..c557ac2d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd +// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 67c02dd5..341b4d96 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd +// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a97..f9c7f479 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,6 +1,7 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux +// +build linux package unix @@ -480,18 +481,14 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_BEFORE = 0x8 - BPF_F_ID = 0x20 - BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -524,7 +521,6 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 - BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -780,8 +776,6 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 - DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 - DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,14 +1692,12 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 - KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1787,8 +1779,6 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 - LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 - LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -1805,7 +1795,6 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 - LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -1900,7 +1889,6 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 - MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2132,60 +2120,6 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 - NFT_CHAIN_FLAGS = 0x7 - NFT_CHAIN_MAXNAMELEN = 0x100 - NFT_CT_MAX = 0x17 - NFT_DATA_RESERVED_MASK = 0xffffff00 - NFT_DATA_VALUE_MAXLEN = 0x40 - NFT_EXTHDR_OP_MAX = 0x4 - NFT_FIB_RESULT_MAX = 0x3 - NFT_INNER_MASK = 0xf - NFT_LOGLEVEL_MAX = 0x8 - NFT_NAME_MAXLEN = 0x100 - NFT_NG_MAX = 0x1 - NFT_OBJECT_CONNLIMIT = 0x5 - NFT_OBJECT_COUNTER = 0x1 - NFT_OBJECT_CT_EXPECT = 0x9 - NFT_OBJECT_CT_HELPER = 0x3 - NFT_OBJECT_CT_TIMEOUT = 0x7 - NFT_OBJECT_LIMIT = 0x4 - NFT_OBJECT_MAX = 0xa - NFT_OBJECT_QUOTA = 0x2 - NFT_OBJECT_SECMARK = 0x8 - NFT_OBJECT_SYNPROXY = 0xa - NFT_OBJECT_TUNNEL = 0x6 - NFT_OBJECT_UNSPEC = 0x0 - NFT_OBJ_MAXNAMELEN = 0x100 - NFT_OSF_MAXGENRELEN = 0x10 - NFT_QUEUE_FLAG_BYPASS = 0x1 - NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 - NFT_QUEUE_FLAG_MASK = 0x3 - NFT_REG32_COUNT = 0x10 - NFT_REG32_SIZE = 0x4 - NFT_REG_MAX = 0x4 - NFT_REG_SIZE = 0x10 - NFT_REJECT_ICMPX_MAX = 0x3 - NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 - NFT_SET_MAXNAMELEN = 0x100 - NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 - NFT_TABLE_MAXNAMELEN = 0x100 - NFT_TRACETYPE_MAX = 0x3 - NFT_TUNNEL_F_MASK = 0x7 - NFT_TUNNEL_MAX = 0x1 - NFT_TUNNEL_MODE_MAX = 0x2 - NFT_USERDATA_MAXLEN = 0x100 - NFT_XFRM_KEY_MAX = 0x6 - NF_NAT_RANGE_MAP_IPS = 0x1 - NF_NAT_RANGE_MASK = 0x7f - NF_NAT_RANGE_NETMAP = 0x40 - NF_NAT_RANGE_PERSISTENT = 0x8 - NF_NAT_RANGE_PROTO_OFFSET = 0x20 - NF_NAT_RANGE_PROTO_RANDOM = 0x4 - NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 - NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 - NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2305,7 +2239,6 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 - PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -2342,7 +2275,6 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 - PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -2471,7 +2403,6 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 - PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2676,9 +2607,8 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0x1f + RTAX_FEATURE_MASK = 0xf RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2921,38 +2851,9 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 - SECCOMP_ADDFD_FLAG_SEND = 0x2 - SECCOMP_ADDFD_FLAG_SETFD = 0x1 - SECCOMP_FILTER_FLAG_LOG = 0x2 - SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 - SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 - SECCOMP_FILTER_FLAG_TSYNC = 0x1 - SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 - SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 - SECCOMP_GET_ACTION_AVAIL = 0x2 - SECCOMP_GET_NOTIF_SIZES = 0x3 - SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 - SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 - SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 - SECCOMP_RET_ACTION = 0x7fff0000 - SECCOMP_RET_ACTION_FULL = 0xffff0000 - SECCOMP_RET_ALLOW = 0x7fff0000 - SECCOMP_RET_DATA = 0xffff - SECCOMP_RET_ERRNO = 0x50000 - SECCOMP_RET_KILL = 0x0 - SECCOMP_RET_KILL_PROCESS = 0x80000000 - SECCOMP_RET_KILL_THREAD = 0x0 - SECCOMP_RET_LOG = 0x7ffc0000 - SECCOMP_RET_TRACE = 0x7ff00000 - SECCOMP_RET_TRAP = 0x30000 - SECCOMP_RET_USER_NOTIF = 0x7fc00000 - SECCOMP_SET_MODE_FILTER = 0x1 - SECCOMP_SET_MODE_STRICT = 0x0 - SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 - SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3112,7 +3013,6 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 - SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3172,7 +3072,6 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 - STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3562,24 +3461,18 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 - XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 - XDP_TXMD_FLAGS_CHECKSUM = 0x2 - XDP_TXMD_FLAGS_TIMESTAMP = 0x1 - XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 - XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 - XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c..30aee00a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux +// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go @@ -281,9 +282,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca43600..8ebfa512 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux +// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go @@ -282,9 +283,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668a..271a21cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux +// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go @@ -288,9 +289,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1..910c330a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux +// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go @@ -278,9 +279,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afd..a640798c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux +// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -118,7 +119,6 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 - LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 @@ -275,9 +275,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92c..0d5925d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux +// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go @@ -281,9 +282,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba..d72a00e0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux +// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go @@ -281,9 +282,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0..02ba129f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux +// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go @@ -281,9 +282,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e..8daa6dd9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux +// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go @@ -281,9 +282,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3..63c8fa2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux +// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go @@ -336,9 +337,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa728..930799ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux +// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go @@ -340,9 +341,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e0150..8605a7dd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux +// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go @@ -340,9 +341,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e..95a016f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux +// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -227,9 +228,6 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff - PTRACE_GETFDPIC = 0x21 - PTRACE_GETFDPIC_EXEC = 0x0 - PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 @@ -272,9 +270,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214..1ae0108f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux +// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go @@ -344,9 +345,6 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d9..1bb7c633 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux +// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go @@ -335,9 +336,6 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 - SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 - SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 - SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 130085df..72f7420d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd +// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 84769a1a..8d4eb0c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd +// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 602ded00..9eef9749 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd +// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index efc0406e..3b62ba19 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd +// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 5a6500f8..af20e474 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd +// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index a5aeeb97..6015fcb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd +// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 0e9748a7..8d44955e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd +// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 4f4449ab..ae16fe75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd +// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 76a363f0..03d90fe3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd +// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 43ca0cdf..8e2c51b1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd +// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index b1b8bb20..13d40303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd +// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index d2ddd317..1afee6a0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris +// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..fc7d0506 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build zos && s390x +// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. @@ -10,99 +11,41 @@ package unix const ( - BRKINT = 0x0001 - CLOCAL = 0x1 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLONE_NEWIPC = 0x08000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x00020000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUTS = 0x04000000 - CLONE_PARENT = 0x00008000 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - EFD_SEMAPHORE = 0x00002000 - EFD_CLOEXEC = 0x00001000 - EFD_NONBLOCK = 0x00000004 - EPOLL_CLOEXEC = 0x00001000 - EPOLL_CTL_ADD = 0 - EPOLL_CTL_MOD = 1 - EPOLL_CTL_DEL = 2 - EPOLLRDNORM = 0x0001 - EPOLLRDBAND = 0x0002 - EPOLLIN = 0x0003 - EPOLLOUT = 0x0004 - EPOLLWRBAND = 0x0008 - EPOLLPRI = 0x0010 - EPOLLERR = 0x0020 - EPOLLHUP = 0x0040 - EPOLLEXCLUSIVE = 0x20000000 - EPOLLONESHOT = 0x40000000 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FD_SETSIZE = 0x800 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" - GRND_NONBLOCK = 1 - GRND_RANDOM = 2 - HUPCL = 0x0100 // Hang up on last close - IN_CLOEXEC = 0x00001000 - IN_NONBLOCK = 0x00000004 - IN_ACCESS = 0x00000001 - IN_MODIFY = 0x00000002 - IN_ATTRIB = 0x00000004 - IN_CLOSE_WRITE = 0x00000008 - IN_CLOSE_NOWRITE = 0x00000010 - IN_OPEN = 0x00000020 - IN_MOVED_FROM = 0x00000040 - IN_MOVED_TO = 0x00000080 - IN_CREATE = 0x00000100 - IN_DELETE = 0x00000200 - IN_DELETE_SELF = 0x00000400 - IN_MOVE_SELF = 0x00000800 - IN_UNMOUNT = 0x00002000 - IN_Q_OVERFLOW = 0x00004000 - IN_IGNORED = 0x00008000 - IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) - IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) - IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | - IN_CLOSE | IN_OPEN | IN_MOVE | - IN_CREATE | IN_DELETE | IN_DELETE_SELF | - IN_MOVE_SELF) - IN_ONLYDIR = 0x01000000 - IN_DONT_FOLLOW = 0x02000000 - IN_EXCL_UNLINK = 0x04000000 - IN_MASK_CREATE = 0x10000000 - IN_MASK_ADD = 0x20000000 - IN_ISDIR = 0x40000000 - IN_ONESHOT = 0x80000000 + BRKINT = 0x0001 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -210,18 +153,10 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 14 + IP_TTL = 3 IP_UNBLOCK_SOURCE = 11 - ICMP6_FILTER = 1 - MCAST_INCLUDE = 0 - MCAST_EXCLUDE = 1 - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 + ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -231,10 +166,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_UN = 0x8 + LOCK_SH = 0x1 // Not exist on zOS + LOCK_EX = 0x2 // Not exist on zOS + LOCK_NB = 0x4 // Not exist on zOS + LOCK_UN = 0x8 // Not exist on zOS POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -248,29 +183,15 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - __MAP_MEGA = 0x8 - __MAP_64 = 0x10 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 45 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings - MS_BIND = 0x00001000 - MS_MOVE = 0x00002000 - MS_NOSUID = 0x00000002 - MS_PRIVATE = 0x00040000 - MS_REC = 0x00004000 - MS_REMOUNT = 0x00008000 - MS_RDONLY = 0x00000001 - MS_UNBINDABLE = 0x00020000 - MNT_DETACH = 0x00000004 - ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS - NFS_SUPER_MAGIC = 0x6969 // NFS - NSFS_MAGIC = 0x6e736673 // PROCNS - PROC_SUPER_MAGIC = 0x9fa0 // proc FS - ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS - ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS - ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -285,20 +206,13 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 - ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 - O_DIRECT = 0x00002000 - O_NOFOLLOW = 0x00004000 - O_DIRECTORY = 0x00008000 - O_PATH = 0x00080000 - O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 - O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -335,7 +249,6 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -373,33 +286,15 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 - RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 - SCHED_FIFO = 0x2 - SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 - SHM_RND = 0x2 - SHM_RDONLY = 0x1 - SHMLBA = 0x1000 - IPC_STAT = 0x3 - IPC_SET = 0x2 - IPC_RMID = 0x1 - IPC_PRIVATE = 0x0 - IPC_CREAT = 0x1000000 - __IPC_MEGA = 0x4000000 - __IPC_SHAREAS = 0x20000000 - __IPC_BELOWBAR = 0x10000000 - IPC_EXCL = 0x2000000 - __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 - SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 - SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -484,6 +379,8 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 + TCP_INFO = 0xb + TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -531,10 +428,7 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 - WEXITED = 0x8 WNOHANG = 0x1 - WNOWAIT = 0x20 - WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -559,28 +453,8 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - P_PID = 0 - P_PGID = 1 - P_ALL = 2 - PR_SET_NAME = 15 - PR_GET_NAME = 16 - PR_SET_NO_NEW_PRIVS = 38 - PR_GET_NO_NEW_PRIVS = 39 - PR_SET_DUMPABLE = 4 - PR_GET_DUMPABLE = 3 - PR_SET_PDEATHSIG = 1 - PR_GET_PDEATHSIG = 2 - PR_SET_CHILD_SUBREAPER = 36 - PR_GET_CHILD_SUBREAPER = 37 - AT_FDCWD = -100 - AT_EACCESS = 0x200 - AT_EMPTY_PATH = 0x1000 - AT_REMOVEDIR = 0x200 - RENAME_NOREPLACE = 1 << 0 + AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics + AT_FDCWD = 2 // for Unix compatibility -- no zos semantics ) const ( @@ -603,7 +477,6 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) - ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -828,7 +701,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "EDC5148I", "The named attribute or data not available."}, + {148, "", ""}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -871,7 +744,6 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, - {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 586317c7..97f20ca2 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,6 +1,8 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) +// +build linux +// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index d7c881be..0b5f7943 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,6 +1,8 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) +// +build linux +// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2d2de5d2..2807f7e6 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,6 +1,8 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) +// +build linux +// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 5adc79fb..281ea64e 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,6 +1,8 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) +// +build linux +// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s deleted file mode 100644 index b77ff5db..00000000 --- a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s +++ /dev/null @@ -1,364 +0,0 @@ -// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build zos && s390x -#include "textflag.h" - -// provide the address of function variable to be fixed up. - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Flistxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fremovexattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fgetxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fsetxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·accept4(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Removexattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Dup3(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Dirfd(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·EpollCreate(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·EpollCreate1(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·EpollCtl(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·EpollPwait(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·EpollWait(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Eventfd(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Faccessat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fchmodat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fchownat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fdatasync(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·fstatat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Lgetxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Lsetxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Fstatfs(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Futimes(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Futimesat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Getrandom(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·InotifyInit(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·InotifyInit1(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·InotifyAddWatch(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·InotifyRmWatch(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Listxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Llistxattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Lremovexattr(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Lutimes(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Statfs(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Syncfs(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Unshare(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Linkat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Mkdirat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Mknodat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·PivotRoot(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Prctl(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Prlimit(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Renameat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Renameat2(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Sethostname(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Setns(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Symlinkat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·Unlinkat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·openat(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·openat2(SB), R8 - MOVD R8, ret+0(FP) - RET - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 - MOVD $·utimensat(SB), R8 - MOVD R8, ret+0(FP) - RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 6ea64a3c..d1d1d233 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc +// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 99ee4399..f99a18ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 +// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index b68a7836..c4d50ae5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc +// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 0a87450b..6903d3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo +// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f24..1cad561e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 +// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997..b18edbd0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 +// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index aad65fc7..0c67df64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 +// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index c0096391..e6e05d14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 +// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7664df74..7508acca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 +// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index ae099182..7b56aead 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm +// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 11fd5d45..cc623dca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 +// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index c3d2d653..58184919 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 +// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index c698cbc0..6be25cd1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 +// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a..1ff3aec7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,6 +1,7 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux +// +build linux package unix @@ -37,21 +38,6 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -906,16 +892,6 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { - _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -2219,13 +2195,3 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { - _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 4def3e9f..07b549cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 +// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index fef2bc8b..5f481bf8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 +// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index a9fd76a8..824cd52c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm +// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 46006502..e77aecfe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 +// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index c8987d26..806ffd1e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 +// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 921f4306..961a3afb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips +// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 44f06782..ed05005e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 +// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index e7fa0abf..d365b718 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le +// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 8c512567..c3f1b8bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle +// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 7392fd45..a6574cf9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc +// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 41180434..f4099026 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 +// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 40c6ce7a..9dfcc299 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le +// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 2cfe34ad..0ab4f2ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 +// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 61e6f070..6cde3223 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x +// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 834b8420..5253d65b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 +// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index e91ebc14..2df3c5ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 +// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index be28babb..a60556ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 +// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index fb587e82..9f788917 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm +// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index d576438b..82a4cb2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 +// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410..66b3b645 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 +// +build openbsd,386 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b56173..3dcacd30 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 -DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751..c5c4cc11 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 +// +build openbsd,amd64 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656..2763620b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776..93bfbb32 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm +// +build openbsd,arm package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f..c9223140 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 -DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d07..a107b8fd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 +// +build openbsd,arm64 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d5321..a6bc32c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e6..c427de50 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 +// +build openbsd,mips64 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b6..b4e7bcea 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354..60c1a99a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 +// +build openbsd,ppc64 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff0..ca3f7660 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,12 +213,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_fcntl(SB) - RET -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -807,26 +801,8 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_getfsstat(SB) - RET -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_pledge(SB) - RET -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_unveil(SB) - RET -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed5..52eba360 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 +// +build openbsd,riscv64 package unix @@ -584,32 +585,6 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fcntl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2238,21 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_getfsstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2269,31 +2229,3 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pledge(promises *byte, execpromises *byte) (err error) { - _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_pledge_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pledge pledge "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unveil(path *byte, flags *byte) (err error) { - _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_unveil_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a..477a7d5b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,11 +178,6 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) -TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) - TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -673,22 +668,7 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) -TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) - TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) - -TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pledge(SB) -GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) - -TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unveil(SB) -GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87fe..b4018946 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 +// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 7ccf66b7..1d8fe1d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,105 +1,23 @@ -// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x +// +build zos,s390x package unix import ( - "runtime" - "syscall" "unsafe" ) -var _ syscall.Errno - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - runtime.ExitSyscall() - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) - -var Flistxattr = enter_Flistxattr - -func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { - funcref := get_FlistxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { - *funcref = impl_Flistxattr - } else { - *funcref = error_Flistxattr - } - return (*funcref)(fd, dest) -} - -func error_Flistxattr(fd int, dest []byte) (sz int, err error) { - sz = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) - -var Fremovexattr = enter_Fremovexattr - -func enter_Fremovexattr(fd int, attr string) (err error) { - funcref := get_FremovexattrAddr() - if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { - *funcref = impl_Fremovexattr - } else { - *funcref = error_Fremovexattr + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(fd, attr) -} - -func error_Fremovexattr(fd int, attr string) (err error) { - err = ENOSYS return } @@ -112,12 +30,10 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -131,159 +47,31 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - runtime.ExitSyscall() - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) - -var Fgetxattr = enter_Fgetxattr - -func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - funcref := get_FgetxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { - *funcref = impl_Fgetxattr - } else { - *funcref = error_Fgetxattr - } - return (*funcref)(fd, attr, dest) -} - -func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - sz = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(data) > 0 { - _p1 = unsafe.Pointer(&data[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) - -var Fsetxattr = enter_Fsetxattr - -func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { - funcref := get_FsetxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { - *funcref = impl_Fsetxattr - } else { - *funcref = error_Fsetxattr + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(fd, attr, data, flag) -} - -func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) - -var accept4 = enter_accept4 - -func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - funcref := get_accept4Addr() - if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { - *funcref = impl_accept4 - } else { - *funcref = error_accept4 + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(s, rsa, addrlen, flags) -} - -func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { - fd = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -291,11 +79,9 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -303,10 +89,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) nn = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -314,9 +100,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -324,11 +110,9 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -336,11 +120,9 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -348,10 +130,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -359,9 +141,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -369,9 +151,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -379,52 +161,10 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_RemovexattrAddr() *(func(path string, attr string) (err error)) - -var Removexattr = enter_Removexattr - -func enter_Removexattr(path string, attr string) (err error) { - funcref := get_RemovexattrAddr() - if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { - *funcref = impl_Removexattr - } else { - *funcref = error_Removexattr + _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(path, attr) -} - -func error_Removexattr(path string, attr string) (err error) { - err = ENOSYS return } @@ -437,12 +177,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -456,11 +194,9 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -468,12 +204,10 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -481,12 +215,10 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -494,12 +226,10 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -507,11 +237,9 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -519,11 +247,9 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -531,62 +257,9 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) - runtime.ExitSyscall() - ret = uintptr(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) - runtime.ExitSyscall() - result = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func shmdt(addr uintptr) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func shmget(key int, size int, flag int) (id int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) - runtime.ExitSyscall() - id = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -599,11 +272,9 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -616,11 +287,9 @@ func Chdir(path string) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -633,11 +302,9 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -650,11 +317,9 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -667,12 +332,10 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -680,12 +343,10 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -693,2216 +354,617 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Dup3(oldfd int, newfd int, flags int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } +func Errno2() (er2 int) { + uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) + er2 = int(uer2) return } -//go:nosplit -func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) - -var Dup3 = enter_Dup3 +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Dup3(oldfd int, newfd int, flags int) (err error) { - funcref := get_Dup3Addr() - if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { - *funcref = impl_Dup3 - } else { - *funcref = error_Dup3 - } - return (*funcref)(oldfd, newfd, flags) +func Err2ad() (eadd *int) { + ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) + eadd = (*int)(unsafe.Pointer(ueadd)) + return } -func error_Dup3(oldfd int, newfd int, flags int) (err error) { - err = ENOSYS +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Dirfd(dirp uintptr) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) - -var Dirfd = enter_Dirfd +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Dirfd(dirp uintptr) (fd int, err error) { - funcref := get_DirfdAddr() - if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { - *funcref = impl_Dirfd - } else { - *funcref = error_Dirfd +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(dirp) -} - -func error_Dirfd(dirp uintptr) (fd int, err error) { - fd = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_EpollCreate(size int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_EpollCreateAddr() *(func(size int) (fd int, err error)) - -var EpollCreate = enter_EpollCreate +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_EpollCreate(size int) (fd int, err error) { - funcref := get_EpollCreateAddr() - if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { - *funcref = impl_EpollCreate - } else { - *funcref = error_EpollCreate +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + retval = int(r0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(size) -} - -func error_EpollCreate(size int) (fd int, err error) { - fd = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_EpollCreate1(flags int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func fstat(fd int, stat *Stat_LE_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) - -var EpollCreate1 = enter_EpollCreate1 +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_EpollCreate1(flags int) (fd int, err error) { - funcref := get_EpollCreate1Addr() - if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { - *funcref = impl_EpollCreate1 - } else { - *funcref = error_EpollCreate1 +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(flags) -} - -func error_EpollCreate1(flags int) (fd int, err error) { - fd = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) - -var EpollCtl = enter_EpollCtl +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - funcref := get_EpollCtlAddr() - if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { - *funcref = impl_EpollCtl - } else { - *funcref = error_EpollCtl +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(epfd, op, fd, event) + return } -func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - err = ENOSYS +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpagesize() (pgsize int) { + r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) + pgsize = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { +func Mprotect(b []byte, prot int) (err error) { var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) - -var EpollPwait = enter_EpollPwait +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { - funcref := get_EpollPwaitAddr() - if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { - *funcref = impl_EpollPwait +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) } else { - *funcref = error_EpollPwait + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(epfd, events, msec, sigmask) -} - -func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { - n = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { +func Poll(fds []PollFd, timeout int) (n int, err error) { var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) - -var EpollWait = enter_EpollWait - -func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - funcref := get_EpollWaitAddr() - if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { - *funcref = impl_EpollWait - } else { - *funcref = error_EpollWait + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(epfd, events, msec) -} - -func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - n = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Errno2() (er2 int) { - runtime.EnterSyscall() - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) - runtime.ExitSyscall() - er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Eventfd(initval uint, flags int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) - -var Eventfd = enter_Eventfd +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Eventfd(initval uint, flags int) (fd int, err error) { - funcref := get_EventfdAddr() - if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { - *funcref = impl_Eventfd - } else { - *funcref = error_Eventfd +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(initval, flags) -} - -func error_Eventfd(initval uint, flags int) (fd int, err error) { - fd = -1 - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Exit(code int) { - runtime.EnterSyscall() - CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) - runtime.ExitSyscall() +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -var Faccessat = enter_Faccessat - -func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - funcref := get_FaccessatAddr() - if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { - *funcref = impl_Faccessat - } else { - *funcref = error_Faccessat - } - return (*funcref)(dirfd, path, mode, flags) -} - -func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) - -var Fchmodat = enter_Fchmodat - -func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - funcref := get_FchmodatAddr() - if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { - *funcref = impl_Fchmodat - } else { - *funcref = error_Fchmodat - } - return (*funcref)(dirfd, path, mode, flags) -} - -func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) - -var Fchownat = enter_Fchownat - -func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { - funcref := get_FchownatAddr() - if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { - *funcref = impl_Fchownat - } else { - *funcref = error_Fchownat - } - return (*funcref)(fd, path, uid, gid, flags) -} - -func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) - runtime.ExitSyscall() - retval = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fdatasync(fd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FdatasyncAddr() *(func(fd int) (err error)) - -var Fdatasync = enter_Fdatasync - -func enter_Fdatasync(fd int) (err error) { - funcref := get_FdatasyncAddr() - if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { - *funcref = impl_Fdatasync - } else { - *funcref = error_Fdatasync - } - return (*funcref)(fd) -} - -func error_Fdatasync(fd int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstat(fd int, stat *Stat_LE_t) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) - -var fstatat = enter_fstatat - -func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { - funcref := get_fstatatAddr() - if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { - *funcref = impl_fstatat - } else { - *funcref = error_fstatat - } - return (*funcref)(dirfd, path, stat, flags) -} - -func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { +func unmount(filesystem string, mtm int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(link) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) + _p0, err = BytePtrFromString(filesystem) if err != nil { return } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) - runtime.ExitSyscall() - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) - -var Lgetxattr = enter_Lgetxattr - -func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { - funcref := get_LgetxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { - *funcref = impl_Lgetxattr - } else { - *funcref = error_Lgetxattr - } - return (*funcref)(link, attr, dest) -} - -func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { - sz = -1 - err = ENOSYS - return -} - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { +func Chroot(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) - -var Lsetxattr = enter_Lsetxattr - -func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - funcref := get_LsetxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { - *funcref = impl_Lsetxattr - } else { - *funcref = error_Lsetxattr - } - return (*funcref)(path, attr, data, flags) -} - -func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) - -var Fstatfs = enter_Fstatfs - -func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { - funcref := get_FstatfsAddr() - if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { - *funcref = impl_Fstatfs - } else { - *funcref = error_Fstatfs - } - return (*funcref)(fd, buf) -} - -func error_Fstatfs(fd int, buf *Statfs_t) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Uname(buf *Utsname) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Futimes(fd int, tv []Timeval) (err error) { +func Gethostname(buf []byte) (err error) { var _p0 unsafe.Pointer - if len(tv) > 0 { - _p0 = unsafe.Pointer(&tv[0]) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) - -var Futimes = enter_Futimes - -func enter_Futimes(fd int, tv []Timeval) (err error) { - funcref := get_FutimesAddr() - if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { - *funcref = impl_Futimes - } else { - *funcref = error_Futimes + _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(fd, tv) -} - -func error_Futimes(fd int, tv []Timeval) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(tv) > 0 { - _p1 = unsafe.Pointer(&tv[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) - -var Futimesat = enter_Futimesat - -func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { - funcref := get_FutimesatAddr() - if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { - *funcref = impl_Futimesat - } else { - *funcref = error_Futimesat - } - return (*funcref)(dirfd, path, tv) -} - -func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { - err = ENOSYS +func Getegid() (egid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ftruncate(fd int, length int64) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } +func Geteuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) - -var Getrandom = enter_Getrandom - -func enter_Getrandom(buf []byte, flags int) (n int, err error) { - funcref := get_GetrandomAddr() - if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { - *funcref = impl_Getrandom - } else { - *funcref = error_Getrandom - } - return (*funcref)(buf, flags) -} - -func error_Getrandom(buf []byte, flags int) (n int, err error) { - n = -1 - err = ENOSYS +func Getgid() (gid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_InotifyInit() (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_InotifyInitAddr() *(func() (fd int, err error)) - -var InotifyInit = enter_InotifyInit - -func enter_InotifyInit() (fd int, err error) { - funcref := get_InotifyInitAddr() - if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { - *funcref = impl_InotifyInit - } else { - *funcref = error_InotifyInit - } - return (*funcref)() -} - -func error_InotifyInit() (fd int, err error) { - fd = -1 - err = ENOSYS +func Getpid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_InotifyInit1(flags int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) - -var InotifyInit1 = enter_InotifyInit1 - -func enter_InotifyInit1(flags int) (fd int, err error) { - funcref := get_InotifyInit1Addr() - if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { - *funcref = impl_InotifyInit1 - } else { - *funcref = error_InotifyInit1 - } - return (*funcref)(flags) -} - -func error_InotifyInit1(flags int) (fd int, err error) { - fd = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - runtime.ExitSyscall() - watchdesc = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) - -var InotifyAddWatch = enter_InotifyAddWatch - -func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - funcref := get_InotifyAddWatchAddr() - if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { - *funcref = impl_InotifyAddWatch - } else { - *funcref = error_InotifyAddWatch - } - return (*funcref)(fd, pathname, mask) -} - -func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - watchdesc = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) - runtime.ExitSyscall() - success = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) - -var InotifyRmWatch = enter_InotifyRmWatch - -func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - funcref := get_InotifyRmWatchAddr() - if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { - *funcref = impl_InotifyRmWatch - } else { - *funcref = error_InotifyRmWatch - } - return (*funcref)(fd, watchdesc) -} - -func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - success = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - runtime.ExitSyscall() - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) - -var Listxattr = enter_Listxattr - -func enter_Listxattr(path string, dest []byte) (sz int, err error) { - funcref := get_ListxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { - *funcref = impl_Listxattr - } else { - *funcref = error_Listxattr - } - return (*funcref)(path, dest) -} - -func error_Listxattr(path string, dest []byte) (sz int, err error) { - sz = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - runtime.ExitSyscall() - sz = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) - -var Llistxattr = enter_Llistxattr - -func enter_Llistxattr(path string, dest []byte) (sz int, err error) { - funcref := get_LlistxattrAddr() - if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { - *funcref = impl_Llistxattr - } else { - *funcref = error_Llistxattr - } - return (*funcref)(path, dest) -} - -func error_Llistxattr(path string, dest []byte) (sz int, err error) { - sz = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_LremovexattrAddr() *(func(path string, attr string) (err error)) - -var Lremovexattr = enter_Lremovexattr - -func enter_Lremovexattr(path string, attr string) (err error) { - funcref := get_LremovexattrAddr() - if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { - *funcref = impl_Lremovexattr - } else { - *funcref = error_Lremovexattr - } - return (*funcref)(path, attr) -} - -func error_Lremovexattr(path string, attr string) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Lutimes(path string, tv []Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(tv) > 0 { - _p1 = unsafe.Pointer(&tv[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) - -var Lutimes = enter_Lutimes - -func enter_Lutimes(path string, tv []Timeval) (err error) { - funcref := get_LutimesAddr() - if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { - *funcref = impl_Lutimes - } else { - *funcref = error_Lutimes - } - return (*funcref)(path, tv) -} - -func error_Lutimes(path string, tv []Timeval) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Poll(fds []PollFd, timeout int) (n int, err error) { - var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Statfs(path string, buf *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) - -var Statfs = enter_Statfs - -func enter_Statfs(path string, buf *Statfs_t) (err error) { - funcref := get_StatfsAddr() - if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { - *funcref = impl_Statfs - } else { - *funcref = error_Statfs - } - return (*funcref)(path, buf) -} - -func error_Statfs(path string, buf *Statfs_t) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Syncfs(fd int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_SyncfsAddr() *(func(fd int) (err error)) - -var Syncfs = enter_Syncfs - -func enter_Syncfs(fd int) (err error) { - funcref := get_SyncfsAddr() - if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { - *funcref = impl_Syncfs - } else { - *funcref = error_Syncfs - } - return (*funcref)(fd) -} - -func error_Syncfs(fd int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) - runtime.ExitSyscall() - ticks = uintptr(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) - runtime.ExitSyscall() - lastsys = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) - runtime.ExitSyscall() - lastsys = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func unmount_LE(filesystem string, mtm int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(filesystem) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) - runtime.ExitSyscall() - ret = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Unshare(flags int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_UnshareAddr() *(func(flags int) (err error)) - -var Unshare = enter_Unshare - -func enter_Unshare(flags int) (err error) { - funcref := get_UnshareAddr() - if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { - *funcref = impl_Unshare - } else { - *funcref = error_Unshare - } - return (*funcref)(flags) -} - -func error_Unshare(flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gethostname(buf []byte) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) - pgid = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (pid int) { - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) - runtime.ExitSyscall() - prio = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(resource int, rlim *Rlimit) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getrusage(who int, rusage *rusage_zos) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - runtime.EnterSyscall() - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) - runtime.ExitSyscall() - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (euid int) { - runtime.EnterSyscall() - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) - runtime.ExitSyscall() - euid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) - sid = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig Signal) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldPath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newPath) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) - -var Linkat = enter_Linkat - -func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { - funcref := get_LinkatAddr() - if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { - *funcref = impl_Linkat - } else { - *funcref = error_Linkat - } - return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) -} - -func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, n int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func lstat(path string, stat *Stat_LE_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) - -var Mkdirat = enter_Mkdirat - -func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { - funcref := get_MkdiratAddr() - if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { - *funcref = impl_Mkdirat - } else { - *funcref = error_Mkdirat - } - return (*funcref)(dirfd, path, mode) -} - -func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) - -var Mknodat = enter_Mknodat - -func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - funcref := get_MknodatAddr() - if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { - *funcref = impl_Mknodat - } else { - *funcref = error_Mknodat - } - return (*funcref)(dirfd, path, mode, dev) -} - -func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_PivotRoot(newroot string, oldroot string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(oldroot) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) - -var PivotRoot = enter_PivotRoot - -func enter_PivotRoot(newroot string, oldroot string) (err error) { - funcref := get_PivotRootAddr() - if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { - *funcref = impl_PivotRoot - } else { - *funcref = error_PivotRoot - } - return (*funcref)(newroot, oldroot) -} - -func error_PivotRoot(newroot string, oldroot string) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } + r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) + pid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) - runtime.ExitSyscall() - n = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) - -var Prctl = enter_Prctl +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - funcref := get_PrctlAddr() - if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { - *funcref = impl_Prctl - } else { - *funcref = error_Prctl +func getrusage(who int, rusage *rusage_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(option, arg2, arg3, arg4, arg5) -} - -func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) - -var Prlimit = enter_Prlimit +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - funcref := get_PrlimitAddr() - if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { - *funcref = impl_Prlimit - } else { - *funcref = error_Prlimit - } - return (*funcref)(pid, resource, newlimit, old) +func Getuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return } -func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - err = ENOSYS +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rename(from string, to string) (err error) { +func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) + _p0, err = BytePtrFromString(path) if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { +func Link(path string, link string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(oldpath) + _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(newpath) + _p1, err = BytePtrFromString(link) if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) - -var Renameat = enter_Renameat +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - funcref := get_RenameatAddr() - if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { - *funcref = impl_Renameat - } else { - *funcref = error_Renameat +func Listen(s int, n int) (err error) { + _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(olddirfd, oldpath, newdirfd, newpath) -} - -func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { +func lstat(path string, stat *Stat_LE_t) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) + _p0, err = BytePtrFromString(path) if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) - -var Renameat2 = enter_Renameat2 - -func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - funcref := get_Renameat2Addr() - if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { - *funcref = impl_Renameat2 - } else { - *funcref = error_Renameat2 + _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) -} - -func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { +func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Seek(fd int, offset int64, whence int) (off int64, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) - runtime.ExitSyscall() - off = int64(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setegid(egid int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Seteuid(euid int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Sethostname(p []byte) (err error) { +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_SethostnameAddr() *(func(p []byte) (err error)) - -var Sethostname = enter_Sethostname +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Sethostname(p []byte) (err error) { - funcref := get_SethostnameAddr() - if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { - *funcref = impl_Sethostname +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) } else { - *funcref = error_Sethostname + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(p) -} - -func error_Sethostname(p []byte) (err error) { - err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func impl_Setns(fd int, nstype int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_SetnsAddr() *(func(fd int, nstype int) (err error)) - -var Setns = enter_Setns +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func enter_Setns(fd int, nstype int) (err error) { - funcref := get_SetnsAddr() - if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { - *funcref = impl_Setns - } else { - *funcref = error_Setns +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(fd, nstype) + return } -func error_Setns(fd int, nstype int) (err error) { - err = ENOSYS +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2910,9 +972,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2920,9 +982,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2930,9 +992,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2940,9 +1002,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2950,10 +1012,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) + r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2961,11 +1023,9 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2973,11 +1033,9 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -2985,11 +1043,9 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3002,11 +1058,9 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3024,63 +1078,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldPath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newPath) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } -//go:nosplit -func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) - -var Symlinkat = enter_Symlinkat - -func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { - funcref := get_SymlinkatAddr() - if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { - *funcref = impl_Symlinkat - } else { - *funcref = error_Symlinkat - } - return (*funcref)(oldPath, dirfd, newPath) -} - -func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { - err = ENOSYS - return -} - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - runtime.EnterSyscall() - CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) - runtime.ExitSyscall() + syscall_syscall(SYS_SYNC, 0, 0, 0) return } @@ -3092,11 +1100,9 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3104,11 +1110,9 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3116,11 +1120,9 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3128,9 +1130,7 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - runtime.EnterSyscall() - r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) - runtime.ExitSyscall() + r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -3143,49 +1143,10 @@ func Unlink(path string) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) - -var Unlinkat = enter_Unlinkat - -func enter_Unlinkat(dirfd int, path string, flags int) (err error) { - funcref := get_UnlinkatAddr() - if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { - *funcref = impl_Unlinkat - } else { - *funcref = error_Unlinkat + _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(dirfd, path, flags) -} - -func error_Unlinkat(dirfd int, path string, flags int) (err error) { - err = ENOSYS return } @@ -3197,11 +1158,9 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3214,91 +1173,11 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) - -var openat = enter_openat - -func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - funcref := get_openatAddr() - if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { - *funcref = impl_openat - } else { - *funcref = error_openat - } - return (*funcref)(dirfd, path, flags, mode) -} - -func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - fd = -1 - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) - -var openat2 = enter_openat2 - -func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { - funcref := get_openat2Addr() - if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { - *funcref = impl_openat2 - } else { - *funcref = error_openat2 + if e1 != 0 { + err = errnoErr(e1) } - return (*funcref)(dirfd, path, open_how, size) -} - -func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { - fd = -1 - err = ENOSYS return } @@ -3310,23 +1189,9 @@ func remove(path string) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func waitid(idType int, id int, info *Siginfo, options int) (err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3334,12 +1199,10 @@ func waitid(idType int, id int, info *Siginfo, options int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) - runtime.ExitSyscall() + r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) wpid = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3347,9 +1210,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3357,9 +1220,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -3372,87 +1235,20 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) - runtime.ExitSyscall() - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -//go:nosplit -func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) - -var utimensat = enter_utimensat - -func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { - funcref := get_utimensatAddr() - if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { - *funcref = impl_utimensat - } else { - *funcref = error_utimensat - } - return (*funcref)(dirfd, path, ts, flags) -} - -func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { - err = ENOSYS - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Posix_openpt(oflag int) (fd int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) - runtime.ExitSyscall() - fd = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) + _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Grantpt(fildes int) (rc int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) - runtime.ExitSyscall() - rc = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlockpt(fildes int) (rc int, err error) { - runtime.EnterSyscall() - r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) - runtime.ExitSyscall() - rc = int(r0) - if int64(r0) == -1 { - err = errnoErr2(e1, e2) +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 3a58ae81..55e04847 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd +// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index dcb7a0eb..d2243cf8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd +// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index db5a7bf1..82dc51bd 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd +// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index 7be575a7..cbdda1a4 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd +// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index d6e3174c..f55eae1a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd +// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index ee97157d..e4405447 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd +// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index 35c3b91d..a0db82fc 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd +// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index 5edda768..f8298ff9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin +// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 0dc9e8b4..5eb433bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin +// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 308ddf3a..703675c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly +// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 418664e3..4e0d9610 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd +// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 34d0b86d..01636b83 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd +// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index b71cf45e..ad99bc10 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd +// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index e32df1c1..89dcc427 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd +// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index 15ad6111..ee37aaa0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd +// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc..9862853d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux +// +build 386,linux package unix @@ -447,14 +448,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d52476..8901f0f4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux +// +build amd64,linux package unix @@ -369,14 +370,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c7477061..6902c37e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux +// +build arm,linux package unix @@ -411,14 +412,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f..a6d3dff8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux +// +build arm64,linux package unix @@ -314,14 +315,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346..b18f3f71 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux +// +build loong64,linux package unix @@ -308,14 +309,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018..0302e5e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux +// +build mips,linux package unix @@ -431,14 +432,4 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 - SYS_FCHMODAT2 = 4452 - SYS_MAP_SHADOW_STACK = 4453 - SYS_FUTEX_WAKE = 4454 - SYS_FUTEX_WAIT = 4455 - SYS_FUTEX_REQUEUE = 4456 - SYS_STATMOUNT = 4457 - SYS_LISTMOUNT = 4458 - SYS_LSM_GET_SELF_ATTR = 4459 - SYS_LSM_SET_SELF_ATTR = 4460 - SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b..6693ba4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux +// +build mips64,linux package unix @@ -361,14 +362,4 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 - SYS_FCHMODAT2 = 5452 - SYS_MAP_SHADOW_STACK = 5453 - SYS_FUTEX_WAKE = 5454 - SYS_FUTEX_WAIT = 5455 - SYS_FUTEX_REQUEUE = 5456 - SYS_STATMOUNT = 5457 - SYS_LISTMOUNT = 5458 - SYS_LSM_GET_SELF_ATTR = 5459 - SYS_LSM_SET_SELF_ATTR = 5460 - SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca..fd93f498 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux +// +build mips64le,linux package unix @@ -361,14 +362,4 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 - SYS_FCHMODAT2 = 5452 - SYS_MAP_SHADOW_STACK = 5453 - SYS_FUTEX_WAKE = 5454 - SYS_FUTEX_WAIT = 5455 - SYS_FUTEX_REQUEUE = 5456 - SYS_STATMOUNT = 5457 - SYS_LISTMOUNT = 5458 - SYS_LSM_GET_SELF_ATTR = 5459 - SYS_LSM_SET_SELF_ATTR = 5460 - SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c3..760ddcad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux +// +build mipsle,linux package unix @@ -431,14 +432,4 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 - SYS_FCHMODAT2 = 4452 - SYS_MAP_SHADOW_STACK = 4453 - SYS_FUTEX_WAKE = 4454 - SYS_FUTEX_WAIT = 4455 - SYS_FUTEX_REQUEUE = 4456 - SYS_STATMOUNT = 4457 - SYS_LISTMOUNT = 4458 - SYS_LSM_GET_SELF_ATTR = 4459 - SYS_LSM_SET_SELF_ATTR = 4460 - SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc4..cff2b255 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux +// +build ppc,linux package unix @@ -438,14 +439,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b..a4b2405d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux +// +build ppc64,linux package unix @@ -410,14 +411,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e1635..aca54b4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux +// +build ppc64le,linux package unix @@ -410,14 +411,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9..9d1738d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux +// +build riscv64,linux package unix @@ -315,14 +316,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad..022878dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux +// +build s390x,linux package unix @@ -376,14 +377,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d7..4100a761 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux +// +build sparc64,linux package unix @@ -389,14 +390,4 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 - SYS_FCHMODAT2 = 452 - SYS_MAP_SHADOW_STACK = 453 - SYS_FUTEX_WAKE = 454 - SYS_FUTEX_WAIT = 455 - SYS_FUTEX_REQUEUE = 456 - SYS_STATMOUNT = 457 - SYS_LISTMOUNT = 458 - SYS_LSM_GET_SELF_ATTR = 459 - SYS_LSM_SET_SELF_ATTR = 460 - SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index b2aa8cd4..3a6699eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd +// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 524a1b1c..5677cd4f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd +// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index d59b943a..e784cb6d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd +// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index 31e771d5..bd4952ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd +// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 9fd77c6c..59773381 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd +// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index af10af28..16af2918 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd +// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index cc2028af..f59b18a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd +// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index c06dd441..721ef591 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd +// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 9ddbf3e0..01c43a01 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd +// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index 19a6ee41..f258cfa2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd +// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 05192a78..07919e0e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd +// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 5e8c263c..073daad4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2852 +1,2670 @@ -// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s -// Code generated by the command above; see README.md. DO NOT EDIT. +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. //go:build zos && s390x +// +build zos,s390x package unix -const ( - SYS_LOG = 0x17 // 23 - SYS_COSH = 0x18 // 24 - SYS_TANH = 0x19 // 25 - SYS_EXP = 0x1A // 26 - SYS_MODF = 0x1B // 27 - SYS_LOG10 = 0x1C // 28 - SYS_FREXP = 0x1D // 29 - SYS_LDEXP = 0x1E // 30 - SYS_CEIL = 0x1F // 31 - SYS_POW = 0x20 // 32 - SYS_SQRT = 0x21 // 33 - SYS_FLOOR = 0x22 // 34 - SYS_J1 = 0x23 // 35 - SYS_FABS = 0x24 // 36 - SYS_FMOD = 0x25 // 37 - SYS_J0 = 0x26 // 38 - SYS_YN = 0x27 // 39 - SYS_JN = 0x28 // 40 - SYS_Y0 = 0x29 // 41 - SYS_Y1 = 0x2A // 42 - SYS_HYPOT = 0x2B // 43 - SYS_ERF = 0x2C // 44 - SYS_ERFC = 0x2D // 45 - SYS_GAMMA = 0x2E // 46 - SYS_ISALPHA = 0x30 // 48 - SYS_ISALNUM = 0x31 // 49 - SYS_ISLOWER = 0x32 // 50 - SYS_ISCNTRL = 0x33 // 51 - SYS_ISDIGIT = 0x34 // 52 - SYS_ISGRAPH = 0x35 // 53 - SYS_ISUPPER = 0x36 // 54 - SYS_ISPRINT = 0x37 // 55 - SYS_ISPUNCT = 0x38 // 56 - SYS_ISSPACE = 0x39 // 57 - SYS_SETLOCAL = 0x3A // 58 - SYS_SETLOCALE = 0x3A // 58 - SYS_ISXDIGIT = 0x3B // 59 - SYS_TOLOWER = 0x3C // 60 - SYS_TOUPPER = 0x3D // 61 - SYS_ASIN = 0x3E // 62 - SYS_SIN = 0x3F // 63 - SYS_COS = 0x40 // 64 - SYS_TAN = 0x41 // 65 - SYS_SINH = 0x42 // 66 - SYS_ACOS = 0x43 // 67 - SYS_ATAN = 0x44 // 68 - SYS_ATAN2 = 0x45 // 69 - SYS_FTELL = 0x46 // 70 - SYS_FGETPOS = 0x47 // 71 - SYS_FSEEK = 0x48 // 72 - SYS_FSETPOS = 0x49 // 73 - SYS_FERROR = 0x4A // 74 - SYS_REWIND = 0x4B // 75 - SYS_CLEARERR = 0x4C // 76 - SYS_FEOF = 0x4D // 77 - SYS_ATOL = 0x4E // 78 - SYS_PERROR = 0x4F // 79 - SYS_ATOF = 0x50 // 80 - SYS_ATOI = 0x51 // 81 - SYS_RAND = 0x52 // 82 - SYS_STRTOD = 0x53 // 83 - SYS_STRTOL = 0x54 // 84 - SYS_STRTOUL = 0x55 // 85 - SYS_MALLOC = 0x56 // 86 - SYS_SRAND = 0x57 // 87 - SYS_CALLOC = 0x58 // 88 - SYS_FREE = 0x59 // 89 - SYS_EXIT = 0x5A // 90 - SYS_REALLOC = 0x5B // 91 - SYS_ABORT = 0x5C // 92 - SYS___ABORT = 0x5C // 92 - SYS_ATEXIT = 0x5D // 93 - SYS_RAISE = 0x5E // 94 - SYS_SETJMP = 0x5F // 95 - SYS_LONGJMP = 0x60 // 96 - SYS_SIGNAL = 0x61 // 97 - SYS_TMPNAM = 0x62 // 98 - SYS_REMOVE = 0x63 // 99 - SYS_RENAME = 0x64 // 100 - SYS_TMPFILE = 0x65 // 101 - SYS_FREOPEN = 0x66 // 102 - SYS_FCLOSE = 0x67 // 103 - SYS_FFLUSH = 0x68 // 104 - SYS_FOPEN = 0x69 // 105 - SYS_FSCANF = 0x6A // 106 - SYS_SETBUF = 0x6B // 107 - SYS_SETVBUF = 0x6C // 108 - SYS_FPRINTF = 0x6D // 109 - SYS_SSCANF = 0x6E // 110 - SYS_PRINTF = 0x6F // 111 - SYS_SCANF = 0x70 // 112 - SYS_SPRINTF = 0x71 // 113 - SYS_FGETC = 0x72 // 114 - SYS_VFPRINTF = 0x73 // 115 - SYS_VPRINTF = 0x74 // 116 - SYS_VSPRINTF = 0x75 // 117 - SYS_GETC = 0x76 // 118 - SYS_FGETS = 0x77 // 119 - SYS_FPUTC = 0x78 // 120 - SYS_FPUTS = 0x79 // 121 - SYS_PUTCHAR = 0x7A // 122 - SYS_GETCHAR = 0x7B // 123 - SYS_GETS = 0x7C // 124 - SYS_PUTC = 0x7D // 125 - SYS_FWRITE = 0x7E // 126 - SYS_PUTS = 0x7F // 127 - SYS_UNGETC = 0x80 // 128 - SYS_FREAD = 0x81 // 129 - SYS_WCSTOMBS = 0x82 // 130 - SYS_MBTOWC = 0x83 // 131 - SYS_WCTOMB = 0x84 // 132 - SYS_MBSTOWCS = 0x85 // 133 - SYS_WCSCPY = 0x86 // 134 - SYS_WCSCAT = 0x87 // 135 - SYS_WCSCHR = 0x88 // 136 - SYS_WCSCMP = 0x89 // 137 - SYS_WCSNCMP = 0x8A // 138 - SYS_WCSCSPN = 0x8B // 139 - SYS_WCSLEN = 0x8C // 140 - SYS_WCSNCAT = 0x8D // 141 - SYS_WCSSPN = 0x8E // 142 - SYS_WCSNCPY = 0x8F // 143 - SYS_ABS = 0x90 // 144 - SYS_DIV = 0x91 // 145 - SYS_LABS = 0x92 // 146 - SYS_STRNCPY = 0x93 // 147 - SYS_MEMCPY = 0x94 // 148 - SYS_MEMMOVE = 0x95 // 149 - SYS_STRCPY = 0x96 // 150 - SYS_STRCMP = 0x97 // 151 - SYS_STRCAT = 0x98 // 152 - SYS_STRNCAT = 0x99 // 153 - SYS_MEMCMP = 0x9A // 154 - SYS_MEMCHR = 0x9B // 155 - SYS_STRCOLL = 0x9C // 156 - SYS_STRNCMP = 0x9D // 157 - SYS_STRXFRM = 0x9E // 158 - SYS_STRRCHR = 0x9F // 159 - SYS_STRCHR = 0xA0 // 160 - SYS_STRCSPN = 0xA1 // 161 - SYS_STRPBRK = 0xA2 // 162 - SYS_MEMSET = 0xA3 // 163 - SYS_STRSPN = 0xA4 // 164 - SYS_STRSTR = 0xA5 // 165 - SYS_STRTOK = 0xA6 // 166 - SYS_DIFFTIME = 0xA7 // 167 - SYS_STRERROR = 0xA8 // 168 - SYS_STRLEN = 0xA9 // 169 - SYS_CLOCK = 0xAA // 170 - SYS_CTIME = 0xAB // 171 - SYS_MKTIME = 0xAC // 172 - SYS_TIME = 0xAD // 173 - SYS_ASCTIME = 0xAE // 174 - SYS_MBLEN = 0xAF // 175 - SYS_GMTIME = 0xB0 // 176 - SYS_LOCALTIM = 0xB1 // 177 - SYS_LOCALTIME = 0xB1 // 177 - SYS_STRFTIME = 0xB2 // 178 - SYS___GETCB = 0xB4 // 180 - SYS_FUPDATE = 0xB5 // 181 - SYS___FUPDT = 0xB5 // 181 - SYS_CLRMEMF = 0xBD // 189 - SYS___CLRMF = 0xBD // 189 - SYS_FETCHEP = 0xBF // 191 - SYS___FTCHEP = 0xBF // 191 - SYS_FLDATA = 0xC1 // 193 - SYS___FLDATA = 0xC1 // 193 - SYS_DYNFREE = 0xC2 // 194 - SYS___DYNFRE = 0xC2 // 194 - SYS_DYNALLOC = 0xC3 // 195 - SYS___DYNALL = 0xC3 // 195 - SYS___CDUMP = 0xC4 // 196 - SYS_CSNAP = 0xC5 // 197 - SYS___CSNAP = 0xC5 // 197 - SYS_CTRACE = 0xC6 // 198 - SYS___CTRACE = 0xC6 // 198 - SYS___CTEST = 0xC7 // 199 - SYS_SETENV = 0xC8 // 200 - SYS___SETENV = 0xC8 // 200 - SYS_CLEARENV = 0xC9 // 201 - SYS___CLRENV = 0xC9 // 201 - SYS___REGCOMP_STD = 0xEA // 234 - SYS_NL_LANGINFO = 0xFC // 252 - SYS_GETSYNTX = 0xFD // 253 - SYS_ISBLANK = 0xFE // 254 - SYS___ISBLNK = 0xFE // 254 - SYS_ISWALNUM = 0xFF // 255 - SYS_ISWALPHA = 0x100 // 256 - SYS_ISWBLANK = 0x101 // 257 - SYS___ISWBLK = 0x101 // 257 - SYS_ISWCNTRL = 0x102 // 258 - SYS_ISWDIGIT = 0x103 // 259 - SYS_ISWGRAPH = 0x104 // 260 - SYS_ISWLOWER = 0x105 // 261 - SYS_ISWPRINT = 0x106 // 262 - SYS_ISWPUNCT = 0x107 // 263 - SYS_ISWSPACE = 0x108 // 264 - SYS_ISWUPPER = 0x109 // 265 - SYS_ISWXDIGI = 0x10A // 266 - SYS_ISWXDIGIT = 0x10A // 266 - SYS_WCTYPE = 0x10B // 267 - SYS_ISWCTYPE = 0x10C // 268 - SYS_TOWLOWER = 0x10D // 269 - SYS_TOWUPPER = 0x10E // 270 - SYS_MBSINIT = 0x10F // 271 - SYS_WCTOB = 0x110 // 272 - SYS_MBRLEN = 0x111 // 273 - SYS_MBRTOWC = 0x112 // 274 - SYS_MBSRTOWC = 0x113 // 275 - SYS_MBSRTOWCS = 0x113 // 275 - SYS_WCRTOMB = 0x114 // 276 - SYS_WCSRTOMB = 0x115 // 277 - SYS_WCSRTOMBS = 0x115 // 277 - SYS___CSID = 0x116 // 278 - SYS___WCSID = 0x117 // 279 - SYS_STRPTIME = 0x118 // 280 - SYS___STRPTM = 0x118 // 280 - SYS_STRFMON = 0x119 // 281 - SYS___RPMTCH = 0x11A // 282 - SYS_WCSSTR = 0x11B // 283 - SYS_WCSTOK = 0x12C // 300 - SYS_WCSTOL = 0x12D // 301 - SYS_WCSTOD = 0x12E // 302 - SYS_WCSTOUL = 0x12F // 303 - SYS_WCSCOLL = 0x130 // 304 - SYS_WCSXFRM = 0x131 // 305 - SYS_WCSWIDTH = 0x132 // 306 - SYS_WCWIDTH = 0x133 // 307 - SYS_WCSFTIME = 0x134 // 308 - SYS_SWPRINTF = 0x135 // 309 - SYS_VSWPRINT = 0x136 // 310 - SYS_VSWPRINTF = 0x136 // 310 - SYS_SWSCANF = 0x137 // 311 - SYS_REGCOMP = 0x138 // 312 - SYS_REGEXEC = 0x139 // 313 - SYS_REGFREE = 0x13A // 314 - SYS_REGERROR = 0x13B // 315 - SYS_FGETWC = 0x13C // 316 - SYS_FGETWS = 0x13D // 317 - SYS_FPUTWC = 0x13E // 318 - SYS_FPUTWS = 0x13F // 319 - SYS_GETWC = 0x140 // 320 - SYS_GETWCHAR = 0x141 // 321 - SYS_PUTWC = 0x142 // 322 - SYS_PUTWCHAR = 0x143 // 323 - SYS_UNGETWC = 0x144 // 324 - SYS_ICONV_OPEN = 0x145 // 325 - SYS_ICONV = 0x146 // 326 - SYS_ICONV_CLOSE = 0x147 // 327 - SYS_ISMCCOLLEL = 0x14C // 332 - SYS_STRTOCOLL = 0x14D // 333 - SYS_COLLTOSTR = 0x14E // 334 - SYS_COLLEQUIV = 0x14F // 335 - SYS_COLLRANGE = 0x150 // 336 - SYS_CCLASS = 0x151 // 337 - SYS_COLLORDER = 0x152 // 338 - SYS___DEMANGLE = 0x154 // 340 - SYS_FDOPEN = 0x155 // 341 - SYS___ERRNO = 0x156 // 342 - SYS___ERRNO2 = 0x157 // 343 - SYS___TERROR = 0x158 // 344 - SYS_MAXCOLL = 0x169 // 361 - SYS_GETMCCOLL = 0x16A // 362 - SYS_GETWMCCOLL = 0x16B // 363 - SYS___ERR2AD = 0x16C // 364 - SYS_DLLQUERYFN = 0x16D // 365 - SYS_DLLQUERYVAR = 0x16E // 366 - SYS_DLLFREE = 0x16F // 367 - SYS_DLLLOAD = 0x170 // 368 - SYS__EXIT = 0x174 // 372 - SYS_ACCESS = 0x175 // 373 - SYS_ALARM = 0x176 // 374 - SYS_CFGETISPEED = 0x177 // 375 - SYS_CFGETOSPEED = 0x178 // 376 - SYS_CFSETISPEED = 0x179 // 377 - SYS_CFSETOSPEED = 0x17A // 378 - SYS_CHDIR = 0x17B // 379 - SYS_CHMOD = 0x17C // 380 - SYS_CHOWN = 0x17D // 381 - SYS_CLOSE = 0x17E // 382 - SYS_CLOSEDIR = 0x17F // 383 - SYS_CREAT = 0x180 // 384 - SYS_CTERMID = 0x181 // 385 - SYS_DUP = 0x182 // 386 - SYS_DUP2 = 0x183 // 387 - SYS_EXECL = 0x184 // 388 - SYS_EXECLE = 0x185 // 389 - SYS_EXECLP = 0x186 // 390 - SYS_EXECV = 0x187 // 391 - SYS_EXECVE = 0x188 // 392 - SYS_EXECVP = 0x189 // 393 - SYS_FCHMOD = 0x18A // 394 - SYS_FCHOWN = 0x18B // 395 - SYS_FCNTL = 0x18C // 396 - SYS_FILENO = 0x18D // 397 - SYS_FORK = 0x18E // 398 - SYS_FPATHCONF = 0x18F // 399 - SYS_FSTAT = 0x190 // 400 - SYS_FSYNC = 0x191 // 401 - SYS_FTRUNCATE = 0x192 // 402 - SYS_GETCWD = 0x193 // 403 - SYS_GETEGID = 0x194 // 404 - SYS_GETEUID = 0x195 // 405 - SYS_GETGID = 0x196 // 406 - SYS_GETGRGID = 0x197 // 407 - SYS_GETGRNAM = 0x198 // 408 - SYS_GETGROUPS = 0x199 // 409 - SYS_GETLOGIN = 0x19A // 410 - SYS_W_GETMNTENT = 0x19B // 411 - SYS_GETPGRP = 0x19C // 412 - SYS_GETPID = 0x19D // 413 - SYS_GETPPID = 0x19E // 414 - SYS_GETPWNAM = 0x19F // 415 - SYS_GETPWUID = 0x1A0 // 416 - SYS_GETUID = 0x1A1 // 417 - SYS_W_IOCTL = 0x1A2 // 418 - SYS_ISATTY = 0x1A3 // 419 - SYS_KILL = 0x1A4 // 420 - SYS_LINK = 0x1A5 // 421 - SYS_LSEEK = 0x1A6 // 422 - SYS_LSTAT = 0x1A7 // 423 - SYS_MKDIR = 0x1A8 // 424 - SYS_MKFIFO = 0x1A9 // 425 - SYS_MKNOD = 0x1AA // 426 - SYS_MOUNT = 0x1AB // 427 - SYS_OPEN = 0x1AC // 428 - SYS_OPENDIR = 0x1AD // 429 - SYS_PATHCONF = 0x1AE // 430 - SYS_PAUSE = 0x1AF // 431 - SYS_PIPE = 0x1B0 // 432 - SYS_W_GETPSENT = 0x1B1 // 433 - SYS_READ = 0x1B2 // 434 - SYS_READDIR = 0x1B3 // 435 - SYS_READLINK = 0x1B4 // 436 - SYS_REWINDDIR = 0x1B5 // 437 - SYS_RMDIR = 0x1B6 // 438 - SYS_SETEGID = 0x1B7 // 439 - SYS_SETEUID = 0x1B8 // 440 - SYS_SETGID = 0x1B9 // 441 - SYS_SETPGID = 0x1BA // 442 - SYS_SETSID = 0x1BB // 443 - SYS_SETUID = 0x1BC // 444 - SYS_SIGACTION = 0x1BD // 445 - SYS_SIGADDSET = 0x1BE // 446 - SYS_SIGDELSET = 0x1BF // 447 - SYS_SIGEMPTYSET = 0x1C0 // 448 - SYS_SIGFILLSET = 0x1C1 // 449 - SYS_SIGISMEMBER = 0x1C2 // 450 - SYS_SIGLONGJMP = 0x1C3 // 451 - SYS_SIGPENDING = 0x1C4 // 452 - SYS_SIGPROCMASK = 0x1C5 // 453 - SYS_SIGSETJMP = 0x1C6 // 454 - SYS_SIGSUSPEND = 0x1C7 // 455 - SYS_SLEEP = 0x1C8 // 456 - SYS_STAT = 0x1C9 // 457 - SYS_W_STATFS = 0x1CA // 458 - SYS_SYMLINK = 0x1CB // 459 - SYS_SYSCONF = 0x1CC // 460 - SYS_TCDRAIN = 0x1CD // 461 - SYS_TCFLOW = 0x1CE // 462 - SYS_TCFLUSH = 0x1CF // 463 - SYS_TCGETATTR = 0x1D0 // 464 - SYS_TCGETPGRP = 0x1D1 // 465 - SYS_TCSENDBREAK = 0x1D2 // 466 - SYS_TCSETATTR = 0x1D3 // 467 - SYS_TCSETPGRP = 0x1D4 // 468 - SYS_TIMES = 0x1D5 // 469 - SYS_TTYNAME = 0x1D6 // 470 - SYS_TZSET = 0x1D7 // 471 - SYS_UMASK = 0x1D8 // 472 - SYS_UMOUNT = 0x1D9 // 473 - SYS_UNAME = 0x1DA // 474 - SYS_UNLINK = 0x1DB // 475 - SYS_UTIME = 0x1DC // 476 - SYS_WAIT = 0x1DD // 477 - SYS_WAITPID = 0x1DE // 478 - SYS_WRITE = 0x1DF // 479 - SYS_CHAUDIT = 0x1E0 // 480 - SYS_FCHAUDIT = 0x1E1 // 481 - SYS_GETGROUPSBYNAME = 0x1E2 // 482 - SYS_SIGWAIT = 0x1E3 // 483 - SYS_PTHREAD_EXIT = 0x1E4 // 484 - SYS_PTHREAD_KILL = 0x1E5 // 485 - SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 - SYS_PTHREAD_CANCEL = 0x1EE // 494 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 - SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 - SYS_PTHREAD_COND_INIT = 0x1F3 // 499 - SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 - SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 - SYS_PTHREAD_CREATE = 0x1F9 // 505 - SYS_PTHREAD_DETACH = 0x1FA // 506 - SYS_PTHREAD_EQUAL = 0x1FB // 507 - SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 - SYS_PTHREAD_JOIN = 0x1FD // 509 - SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 - SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 - SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 - SYS_PTHREAD_ONCE = 0x209 // 521 - SYS_PTHREAD_SELF = 0x20A // 522 - SYS_PTHREAD_SETINTR = 0x20B // 523 - SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 - SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 - SYS_PTHREAD_TESTINTR = 0x20E // 526 - SYS_PTHREAD_YIELD = 0x20F // 527 - SYS_TW_OPEN = 0x210 // 528 - SYS_TW_FCNTL = 0x211 // 529 - SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 - SYS_EXTLINK_NP = 0x215 // 533 - SYS___PASSWD = 0x216 // 534 - SYS_SETGROUPS = 0x217 // 535 - SYS_INITGROUPS = 0x218 // 536 - SYS_WCSPBRK = 0x23F // 575 - SYS_WCSRCHR = 0x240 // 576 - SYS_SVC99 = 0x241 // 577 - SYS___SVC99 = 0x241 // 577 - SYS_WCSWCS = 0x242 // 578 - SYS_LOCALECO = 0x243 // 579 - SYS_LOCALECONV = 0x243 // 579 - SYS___LIBREL = 0x244 // 580 - SYS_RELEASE = 0x245 // 581 - SYS___RLSE = 0x245 // 581 - SYS_FLOCATE = 0x246 // 582 - SYS___FLOCT = 0x246 // 582 - SYS_FDELREC = 0x247 // 583 - SYS___FDLREC = 0x247 // 583 - SYS_FETCH = 0x248 // 584 - SYS___FETCH = 0x248 // 584 - SYS_QSORT = 0x249 // 585 - SYS_GETENV = 0x24A // 586 - SYS_SYSTEM = 0x24B // 587 - SYS_BSEARCH = 0x24C // 588 - SYS_LDIV = 0x24D // 589 - SYS___THROW = 0x25E // 606 - SYS___RETHROW = 0x25F // 607 - SYS___CLEANUPCATCH = 0x260 // 608 - SYS___CATCHMATCH = 0x261 // 609 - SYS___CLEAN2UPCATCH = 0x262 // 610 - SYS_PUTENV = 0x26A // 618 - SYS___GETENV = 0x26F // 623 - SYS_GETPRIORITY = 0x270 // 624 - SYS_NICE = 0x271 // 625 - SYS_SETPRIORITY = 0x272 // 626 - SYS_GETITIMER = 0x273 // 627 - SYS_SETITIMER = 0x274 // 628 - SYS_MSGCTL = 0x275 // 629 - SYS_MSGGET = 0x276 // 630 - SYS_MSGRCV = 0x277 // 631 - SYS_MSGSND = 0x278 // 632 - SYS_MSGXRCV = 0x279 // 633 - SYS___MSGXR = 0x279 // 633 - SYS_SEMCTL = 0x27A // 634 - SYS_SEMGET = 0x27B // 635 - SYS_SEMOP = 0x27C // 636 - SYS_SHMAT = 0x27D // 637 - SYS_SHMCTL = 0x27E // 638 - SYS_SHMDT = 0x27F // 639 - SYS_SHMGET = 0x280 // 640 - SYS___GETIPC = 0x281 // 641 - SYS_SETGRENT = 0x282 // 642 - SYS_GETGRENT = 0x283 // 643 - SYS_ENDGRENT = 0x284 // 644 - SYS_SETPWENT = 0x285 // 645 - SYS_GETPWENT = 0x286 // 646 - SYS_ENDPWENT = 0x287 // 647 - SYS_BSD_SIGNAL = 0x288 // 648 - SYS_KILLPG = 0x289 // 649 - SYS_SIGALTSTACK = 0x28A // 650 - SYS_SIGHOLD = 0x28B // 651 - SYS_SIGIGNORE = 0x28C // 652 - SYS_SIGINTERRUPT = 0x28D // 653 - SYS_SIGPAUSE = 0x28E // 654 - SYS_SIGRELSE = 0x28F // 655 - SYS_SIGSET = 0x290 // 656 - SYS_SIGSTACK = 0x291 // 657 - SYS_GETRLIMIT = 0x292 // 658 - SYS_SETRLIMIT = 0x293 // 659 - SYS_GETRUSAGE = 0x294 // 660 - SYS_MMAP = 0x295 // 661 - SYS_MPROTECT = 0x296 // 662 - SYS_MSYNC = 0x297 // 663 - SYS_MUNMAP = 0x298 // 664 - SYS_CONFSTR = 0x299 // 665 - SYS_GETOPT = 0x29A // 666 - SYS_LCHOWN = 0x29B // 667 - SYS_TRUNCATE = 0x29C // 668 - SYS_GETSUBOPT = 0x29D // 669 - SYS_SETPGRP = 0x29E // 670 - SYS___GDERR = 0x29F // 671 - SYS___TZONE = 0x2A0 // 672 - SYS___DLGHT = 0x2A1 // 673 - SYS___OPARGF = 0x2A2 // 674 - SYS___OPOPTF = 0x2A3 // 675 - SYS___OPINDF = 0x2A4 // 676 - SYS___OPERRF = 0x2A5 // 677 - SYS_GETDATE = 0x2A6 // 678 - SYS_WAIT3 = 0x2A7 // 679 - SYS_WAITID = 0x2A8 // 680 - SYS___CATTRM = 0x2A9 // 681 - SYS___GDTRM = 0x2AA // 682 - SYS___RNDTRM = 0x2AB // 683 - SYS_CRYPT = 0x2AC // 684 - SYS_ENCRYPT = 0x2AD // 685 - SYS_SETKEY = 0x2AE // 686 - SYS___CNVBLK = 0x2AF // 687 - SYS___CRYTRM = 0x2B0 // 688 - SYS___ECRTRM = 0x2B1 // 689 - SYS_DRAND48 = 0x2B2 // 690 - SYS_ERAND48 = 0x2B3 // 691 - SYS_FSTATVFS = 0x2B4 // 692 - SYS_STATVFS = 0x2B5 // 693 - SYS_CATCLOSE = 0x2B6 // 694 - SYS_CATGETS = 0x2B7 // 695 - SYS_CATOPEN = 0x2B8 // 696 - SYS_BCMP = 0x2B9 // 697 - SYS_BCOPY = 0x2BA // 698 - SYS_BZERO = 0x2BB // 699 - SYS_FFS = 0x2BC // 700 - SYS_INDEX = 0x2BD // 701 - SYS_RINDEX = 0x2BE // 702 - SYS_STRCASECMP = 0x2BF // 703 - SYS_STRDUP = 0x2C0 // 704 - SYS_STRNCASECMP = 0x2C1 // 705 - SYS_INITSTATE = 0x2C2 // 706 - SYS_SETSTATE = 0x2C3 // 707 - SYS_RANDOM = 0x2C4 // 708 - SYS_SRANDOM = 0x2C5 // 709 - SYS_HCREATE = 0x2C6 // 710 - SYS_HDESTROY = 0x2C7 // 711 - SYS_HSEARCH = 0x2C8 // 712 - SYS_LFIND = 0x2C9 // 713 - SYS_LSEARCH = 0x2CA // 714 - SYS_TDELETE = 0x2CB // 715 - SYS_TFIND = 0x2CC // 716 - SYS_TSEARCH = 0x2CD // 717 - SYS_TWALK = 0x2CE // 718 - SYS_INSQUE = 0x2CF // 719 - SYS_REMQUE = 0x2D0 // 720 - SYS_POPEN = 0x2D1 // 721 - SYS_PCLOSE = 0x2D2 // 722 - SYS_SWAB = 0x2D3 // 723 - SYS_MEMCCPY = 0x2D4 // 724 - SYS_GETPAGESIZE = 0x2D8 // 728 - SYS_FCHDIR = 0x2D9 // 729 - SYS___OCLCK = 0x2DA // 730 - SYS___ATOE = 0x2DB // 731 - SYS___ATOE_L = 0x2DC // 732 - SYS___ETOA = 0x2DD // 733 - SYS___ETOA_L = 0x2DE // 734 - SYS_SETUTXENT = 0x2DF // 735 - SYS_GETUTXENT = 0x2E0 // 736 - SYS_ENDUTXENT = 0x2E1 // 737 - SYS_GETUTXID = 0x2E2 // 738 - SYS_GETUTXLINE = 0x2E3 // 739 - SYS_PUTUTXLINE = 0x2E4 // 740 - SYS_FMTMSG = 0x2E5 // 741 - SYS_JRAND48 = 0x2E6 // 742 - SYS_LRAND48 = 0x2E7 // 743 - SYS_MRAND48 = 0x2E8 // 744 - SYS_NRAND48 = 0x2E9 // 745 - SYS_LCONG48 = 0x2EA // 746 - SYS_SRAND48 = 0x2EB // 747 - SYS_SEED48 = 0x2EC // 748 - SYS_ISASCII = 0x2ED // 749 - SYS_TOASCII = 0x2EE // 750 - SYS_A64L = 0x2EF // 751 - SYS_L64A = 0x2F0 // 752 - SYS_UALARM = 0x2F1 // 753 - SYS_USLEEP = 0x2F2 // 754 - SYS___UTXTRM = 0x2F3 // 755 - SYS___SRCTRM = 0x2F4 // 756 - SYS_FTIME = 0x2F5 // 757 - SYS_GETTIMEOFDAY = 0x2F6 // 758 - SYS_DBM_CLEARERR = 0x2F7 // 759 - SYS_DBM_CLOSE = 0x2F8 // 760 - SYS_DBM_DELETE = 0x2F9 // 761 - SYS_DBM_ERROR = 0x2FA // 762 - SYS_DBM_FETCH = 0x2FB // 763 - SYS_DBM_FIRSTKEY = 0x2FC // 764 - SYS_DBM_NEXTKEY = 0x2FD // 765 - SYS_DBM_OPEN = 0x2FE // 766 - SYS_DBM_STORE = 0x2FF // 767 - SYS___NDMTRM = 0x300 // 768 - SYS_FTOK = 0x301 // 769 - SYS_BASENAME = 0x302 // 770 - SYS_DIRNAME = 0x303 // 771 - SYS_GETDTABLESIZE = 0x304 // 772 - SYS_MKSTEMP = 0x305 // 773 - SYS_MKTEMP = 0x306 // 774 - SYS_NFTW = 0x307 // 775 - SYS_GETWD = 0x308 // 776 - SYS_LOCKF = 0x309 // 777 - SYS__LONGJMP = 0x30D // 781 - SYS__SETJMP = 0x30E // 782 - SYS_VFORK = 0x30F // 783 - SYS_WORDEXP = 0x310 // 784 - SYS_WORDFREE = 0x311 // 785 - SYS_GETPGID = 0x312 // 786 - SYS_GETSID = 0x313 // 787 - SYS___UTMPXNAME = 0x314 // 788 - SYS_CUSERID = 0x315 // 789 - SYS_GETPASS = 0x316 // 790 - SYS_FNMATCH = 0x317 // 791 - SYS_FTW = 0x318 // 792 - SYS_GETW = 0x319 // 793 - SYS_GLOB = 0x31A // 794 - SYS_GLOBFREE = 0x31B // 795 - SYS_PUTW = 0x31C // 796 - SYS_SEEKDIR = 0x31D // 797 - SYS_TELLDIR = 0x31E // 798 - SYS_TEMPNAM = 0x31F // 799 - SYS_ACOSH = 0x320 // 800 - SYS_ASINH = 0x321 // 801 - SYS_ATANH = 0x322 // 802 - SYS_CBRT = 0x323 // 803 - SYS_EXPM1 = 0x324 // 804 - SYS_ILOGB = 0x325 // 805 - SYS_LOGB = 0x326 // 806 - SYS_LOG1P = 0x327 // 807 - SYS_NEXTAFTER = 0x328 // 808 - SYS_RINT = 0x329 // 809 - SYS_REMAINDER = 0x32A // 810 - SYS_SCALB = 0x32B // 811 - SYS_LGAMMA = 0x32C // 812 - SYS_TTYSLOT = 0x32D // 813 - SYS_GETTIMEOFDAY_R = 0x32E // 814 - SYS_SYNC = 0x32F // 815 - SYS_SPAWN = 0x330 // 816 - SYS_SPAWNP = 0x331 // 817 - SYS_GETLOGIN_UU = 0x332 // 818 - SYS_ECVT = 0x333 // 819 - SYS_FCVT = 0x334 // 820 - SYS_GCVT = 0x335 // 821 - SYS_ACCEPT = 0x336 // 822 - SYS_BIND = 0x337 // 823 - SYS_CONNECT = 0x338 // 824 - SYS_ENDHOSTENT = 0x339 // 825 - SYS_ENDPROTOENT = 0x33A // 826 - SYS_ENDSERVENT = 0x33B // 827 - SYS_GETHOSTBYADDR_R = 0x33C // 828 - SYS_GETHOSTBYADDR = 0x33D // 829 - SYS_GETHOSTBYNAME_R = 0x33E // 830 - SYS_GETHOSTBYNAME = 0x33F // 831 - SYS_GETHOSTENT = 0x340 // 832 - SYS_GETHOSTID = 0x341 // 833 - SYS_GETHOSTNAME = 0x342 // 834 - SYS_GETNETBYADDR = 0x343 // 835 - SYS_GETNETBYNAME = 0x344 // 836 - SYS_GETNETENT = 0x345 // 837 - SYS_GETPEERNAME = 0x346 // 838 - SYS_GETPROTOBYNAME = 0x347 // 839 - SYS_GETPROTOBYNUMBER = 0x348 // 840 - SYS_GETPROTOENT = 0x349 // 841 - SYS_GETSERVBYNAME = 0x34A // 842 - SYS_GETSERVBYPORT = 0x34B // 843 - SYS_GETSERVENT = 0x34C // 844 - SYS_GETSOCKNAME = 0x34D // 845 - SYS_GETSOCKOPT = 0x34E // 846 - SYS_INET_ADDR = 0x34F // 847 - SYS_INET_LNAOF = 0x350 // 848 - SYS_INET_MAKEADDR = 0x351 // 849 - SYS_INET_NETOF = 0x352 // 850 - SYS_INET_NETWORK = 0x353 // 851 - SYS_INET_NTOA = 0x354 // 852 - SYS_IOCTL = 0x355 // 853 - SYS_LISTEN = 0x356 // 854 - SYS_READV = 0x357 // 855 - SYS_RECV = 0x358 // 856 - SYS_RECVFROM = 0x359 // 857 - SYS_SELECT = 0x35B // 859 - SYS_SELECTEX = 0x35C // 860 - SYS_SEND = 0x35D // 861 - SYS_SENDTO = 0x35F // 863 - SYS_SETHOSTENT = 0x360 // 864 - SYS_SETNETENT = 0x361 // 865 - SYS_SETPEER = 0x362 // 866 - SYS_SETPROTOENT = 0x363 // 867 - SYS_SETSERVENT = 0x364 // 868 - SYS_SETSOCKOPT = 0x365 // 869 - SYS_SHUTDOWN = 0x366 // 870 - SYS_SOCKET = 0x367 // 871 - SYS_SOCKETPAIR = 0x368 // 872 - SYS_WRITEV = 0x369 // 873 - SYS_CHROOT = 0x36A // 874 - SYS_W_STATVFS = 0x36B // 875 - SYS_ULIMIT = 0x36C // 876 - SYS_ISNAN = 0x36D // 877 - SYS_UTIMES = 0x36E // 878 - SYS___H_ERRNO = 0x36F // 879 - SYS_ENDNETENT = 0x370 // 880 - SYS_CLOSELOG = 0x371 // 881 - SYS_OPENLOG = 0x372 // 882 - SYS_SETLOGMASK = 0x373 // 883 - SYS_SYSLOG = 0x374 // 884 - SYS_PTSNAME = 0x375 // 885 - SYS_SETREUID = 0x376 // 886 - SYS_SETREGID = 0x377 // 887 - SYS_REALPATH = 0x378 // 888 - SYS___SIGNGAM = 0x379 // 889 - SYS_GRANTPT = 0x37A // 890 - SYS_UNLOCKPT = 0x37B // 891 - SYS_TCGETSID = 0x37C // 892 - SYS___TCGETCP = 0x37D // 893 - SYS___TCSETCP = 0x37E // 894 - SYS___TCSETTABLES = 0x37F // 895 - SYS_POLL = 0x380 // 896 - SYS_REXEC = 0x381 // 897 - SYS___ISASCII2 = 0x382 // 898 - SYS___TOASCII2 = 0x383 // 899 - SYS_CHPRIORITY = 0x384 // 900 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 - SYS___STNETENT = 0x388 // 904 - SYS___STPROTOENT = 0x389 // 905 - SYS___STSERVENT = 0x38A // 906 - SYS___STHOSTENT = 0x38B // 907 - SYS_NLIST = 0x38C // 908 - SYS___IPDBCS = 0x38D // 909 - SYS___IPDSPX = 0x38E // 910 - SYS___IPMSGC = 0x38F // 911 - SYS___SELECT1 = 0x390 // 912 - SYS_PTHREAD_SECURITY_NP = 0x391 // 913 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 - SYS___CONVERT_ID_NP = 0x393 // 915 - SYS___OPENVMREL = 0x394 // 916 - SYS_WMEMCHR = 0x395 // 917 - SYS_WMEMCMP = 0x396 // 918 - SYS_WMEMCPY = 0x397 // 919 - SYS_WMEMMOVE = 0x398 // 920 - SYS_WMEMSET = 0x399 // 921 - SYS___FPUTWC = 0x400 // 1024 - SYS___PUTWC = 0x401 // 1025 - SYS___PWCHAR = 0x402 // 1026 - SYS___WCSFTM = 0x403 // 1027 - SYS___WCSTOK = 0x404 // 1028 - SYS___WCWDTH = 0x405 // 1029 - SYS_T_ACCEPT = 0x409 // 1033 - SYS_T_ALLOC = 0x40A // 1034 - SYS_T_BIND = 0x40B // 1035 - SYS_T_CLOSE = 0x40C // 1036 - SYS_T_CONNECT = 0x40D // 1037 - SYS_T_ERROR = 0x40E // 1038 - SYS_T_FREE = 0x40F // 1039 - SYS_T_GETINFO = 0x410 // 1040 - SYS_T_GETPROTADDR = 0x411 // 1041 - SYS_T_GETSTATE = 0x412 // 1042 - SYS_T_LISTEN = 0x413 // 1043 - SYS_T_LOOK = 0x414 // 1044 - SYS_T_OPEN = 0x415 // 1045 - SYS_T_OPTMGMT = 0x416 // 1046 - SYS_T_RCV = 0x417 // 1047 - SYS_T_RCVCONNECT = 0x418 // 1048 - SYS_T_RCVDIS = 0x419 // 1049 - SYS_T_RCVREL = 0x41A // 1050 - SYS_T_RCVUDATA = 0x41B // 1051 - SYS_T_RCVUDERR = 0x41C // 1052 - SYS_T_SND = 0x41D // 1053 - SYS_T_SNDDIS = 0x41E // 1054 - SYS_T_SNDREL = 0x41F // 1055 - SYS_T_SNDUDATA = 0x420 // 1056 - SYS_T_STRERROR = 0x421 // 1057 - SYS_T_SYNC = 0x422 // 1058 - SYS_T_UNBIND = 0x423 // 1059 - SYS___T_ERRNO = 0x424 // 1060 - SYS___RECVMSG2 = 0x425 // 1061 - SYS___SENDMSG2 = 0x426 // 1062 - SYS_FATTACH = 0x427 // 1063 - SYS_FDETACH = 0x428 // 1064 - SYS_GETMSG = 0x429 // 1065 - SYS_GETPMSG = 0x42A // 1066 - SYS_ISASTREAM = 0x42B // 1067 - SYS_PUTMSG = 0x42C // 1068 - SYS_PUTPMSG = 0x42D // 1069 - SYS___ISPOSIXON = 0x42E // 1070 - SYS___OPENMVSREL = 0x42F // 1071 - SYS_GETCONTEXT = 0x430 // 1072 - SYS_SETCONTEXT = 0x431 // 1073 - SYS_MAKECONTEXT = 0x432 // 1074 - SYS_SWAPCONTEXT = 0x433 // 1075 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 - SYS_GETCLIENTID = 0x470 // 1136 - SYS___GETCLIENTID = 0x471 // 1137 - SYS_GETSTABLESIZE = 0x472 // 1138 - SYS_GETIBMOPT = 0x473 // 1139 - SYS_GETIBMSOCKOPT = 0x474 // 1140 - SYS_GIVESOCKET = 0x475 // 1141 - SYS_IBMSFLUSH = 0x476 // 1142 - SYS_MAXDESC = 0x477 // 1143 - SYS_SETIBMOPT = 0x478 // 1144 - SYS_SETIBMSOCKOPT = 0x479 // 1145 - SYS_SOCK_DEBUG = 0x47A // 1146 - SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 - SYS_TAKESOCKET = 0x47E // 1150 - SYS___SERVER_INIT = 0x47F // 1151 - SYS___SERVER_PWU = 0x480 // 1152 - SYS_PTHREAD_TAG_NP = 0x481 // 1153 - SYS___CONSOLE = 0x482 // 1154 - SYS___WSINIT = 0x483 // 1155 - SYS___IPTCPN = 0x489 // 1161 - SYS___SMF_RECORD = 0x48A // 1162 - SYS___IPHOST = 0x48B // 1163 - SYS___IPNODE = 0x48C // 1164 - SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 - SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 - SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 - SYS___SERVER_CLASSIFY = 0x490 // 1168 - SYS___HEAPRPT = 0x496 // 1174 - SYS___FNWSA = 0x49B // 1179 - SYS___SPAWN2 = 0x49D // 1181 - SYS___SPAWNP2 = 0x49E // 1182 - SYS___GDRR = 0x4A1 // 1185 - SYS___HRRNO = 0x4A2 // 1186 - SYS___OPRG = 0x4A3 // 1187 - SYS___OPRR = 0x4A4 // 1188 - SYS___OPND = 0x4A5 // 1189 - SYS___OPPT = 0x4A6 // 1190 - SYS___SIGGM = 0x4A7 // 1191 - SYS___DGHT = 0x4A8 // 1192 - SYS___TZNE = 0x4A9 // 1193 - SYS___TZZN = 0x4AA // 1194 - SYS___TRRNO = 0x4AF // 1199 - SYS___ENVN = 0x4B0 // 1200 - SYS___MLOCKALL = 0x4B1 // 1201 - SYS_CREATEWO = 0x4B2 // 1202 - SYS_CREATEWORKUNIT = 0x4B2 // 1202 - SYS_CONTINUE = 0x4B3 // 1203 - SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 - SYS_CONNECTW = 0x4B4 // 1204 - SYS_CONNECTWORKMGR = 0x4B4 // 1204 - SYS_CONNECTS = 0x4B5 // 1205 - SYS_CONNECTSERVER = 0x4B5 // 1205 - SYS_DISCONNE = 0x4B6 // 1206 - SYS_DISCONNECTSERVER = 0x4B6 // 1206 - SYS_JOINWORK = 0x4B7 // 1207 - SYS_JOINWORKUNIT = 0x4B7 // 1207 - SYS_LEAVEWOR = 0x4B8 // 1208 - SYS_LEAVEWORKUNIT = 0x4B8 // 1208 - SYS_DELETEWO = 0x4B9 // 1209 - SYS_DELETEWORKUNIT = 0x4B9 // 1209 - SYS_QUERYMET = 0x4BA // 1210 - SYS_QUERYMETRICS = 0x4BA // 1210 - SYS_QUERYSCH = 0x4BB // 1211 - SYS_QUERYSCHENV = 0x4BB // 1211 - SYS_CHECKSCH = 0x4BC // 1212 - SYS_CHECKSCHENV = 0x4BC // 1212 - SYS___PID_AFFINITY = 0x4BD // 1213 - SYS___ASINH_B = 0x4BE // 1214 - SYS___ATAN_B = 0x4BF // 1215 - SYS___CBRT_B = 0x4C0 // 1216 - SYS___CEIL_B = 0x4C1 // 1217 - SYS_COPYSIGN = 0x4C2 // 1218 - SYS___COS_B = 0x4C3 // 1219 - SYS___ERF_B = 0x4C4 // 1220 - SYS___ERFC_B = 0x4C5 // 1221 - SYS___EXPM1_B = 0x4C6 // 1222 - SYS___FABS_B = 0x4C7 // 1223 - SYS_FINITE = 0x4C8 // 1224 - SYS___FLOOR_B = 0x4C9 // 1225 - SYS___FREXP_B = 0x4CA // 1226 - SYS___ILOGB_B = 0x4CB // 1227 - SYS___ISNAN_B = 0x4CC // 1228 - SYS___LDEXP_B = 0x4CD // 1229 - SYS___LOG1P_B = 0x4CE // 1230 - SYS___LOGB_B = 0x4CF // 1231 - SYS_MATHERR = 0x4D0 // 1232 - SYS___MODF_B = 0x4D1 // 1233 - SYS___NEXTAFTER_B = 0x4D2 // 1234 - SYS___RINT_B = 0x4D3 // 1235 - SYS_SCALBN = 0x4D4 // 1236 - SYS_SIGNIFIC = 0x4D5 // 1237 - SYS_SIGNIFICAND = 0x4D5 // 1237 - SYS___SIN_B = 0x4D6 // 1238 - SYS___TAN_B = 0x4D7 // 1239 - SYS___TANH_B = 0x4D8 // 1240 - SYS___ACOS_B = 0x4D9 // 1241 - SYS___ACOSH_B = 0x4DA // 1242 - SYS___ASIN_B = 0x4DB // 1243 - SYS___ATAN2_B = 0x4DC // 1244 - SYS___ATANH_B = 0x4DD // 1245 - SYS___COSH_B = 0x4DE // 1246 - SYS___EXP_B = 0x4DF // 1247 - SYS___FMOD_B = 0x4E0 // 1248 - SYS___GAMMA_B = 0x4E1 // 1249 - SYS_GAMMA_R = 0x4E2 // 1250 - SYS___HYPOT_B = 0x4E3 // 1251 - SYS___J0_B = 0x4E4 // 1252 - SYS___Y0_B = 0x4E5 // 1253 - SYS___J1_B = 0x4E6 // 1254 - SYS___Y1_B = 0x4E7 // 1255 - SYS___JN_B = 0x4E8 // 1256 - SYS___YN_B = 0x4E9 // 1257 - SYS___LGAMMA_B = 0x4EA // 1258 - SYS_LGAMMA_R = 0x4EB // 1259 - SYS___LOG_B = 0x4EC // 1260 - SYS___LOG10_B = 0x4ED // 1261 - SYS___POW_B = 0x4EE // 1262 - SYS___REMAINDER_B = 0x4EF // 1263 - SYS___SCALB_B = 0x4F0 // 1264 - SYS___SINH_B = 0x4F1 // 1265 - SYS___SQRT_B = 0x4F2 // 1266 - SYS___OPENDIR2 = 0x4F3 // 1267 - SYS___READDIR2 = 0x4F4 // 1268 - SYS___LOGIN = 0x4F5 // 1269 - SYS___OPEN_STAT = 0x4F6 // 1270 - SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 - SYS___FP_SETMODE = 0x4F8 // 1272 - SYS___SIGACTIONSET = 0x4FB // 1275 - SYS___UCREATE = 0x4FC // 1276 - SYS___UMALLOC = 0x4FD // 1277 - SYS___UFREE = 0x4FE // 1278 - SYS___UHEAPREPORT = 0x4FF // 1279 - SYS___ISBFP = 0x500 // 1280 - SYS___FP_CAST = 0x501 // 1281 - SYS___CERTIFICATE = 0x502 // 1282 - SYS_SEND_FILE = 0x503 // 1283 - SYS_AIO_CANCEL = 0x504 // 1284 - SYS_AIO_ERROR = 0x505 // 1285 - SYS_AIO_READ = 0x506 // 1286 - SYS_AIO_RETURN = 0x507 // 1287 - SYS_AIO_SUSPEND = 0x508 // 1288 - SYS_AIO_WRITE = 0x509 // 1289 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 - SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 - SYS___CTTBL = 0x517 // 1303 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 - SYS___FP_CLR_FLAG = 0x51A // 1306 - SYS___FP_READ_FLAG = 0x51B // 1307 - SYS___FP_RAISE_XCP = 0x51C // 1308 - SYS___FP_CLASS = 0x51D // 1309 - SYS___FP_FINITE = 0x51E // 1310 - SYS___FP_ISNAN = 0x51F // 1311 - SYS___FP_UNORDERED = 0x520 // 1312 - SYS___FP_READ_RND = 0x521 // 1313 - SYS___FP_READ_RND_B = 0x522 // 1314 - SYS___FP_SWAP_RND = 0x523 // 1315 - SYS___FP_SWAP_RND_B = 0x524 // 1316 - SYS___FP_LEVEL = 0x525 // 1317 - SYS___FP_BTOH = 0x526 // 1318 - SYS___FP_HTOB = 0x527 // 1319 - SYS___FPC_RD = 0x528 // 1320 - SYS___FPC_WR = 0x529 // 1321 - SYS___FPC_RW = 0x52A // 1322 - SYS___FPC_SM = 0x52B // 1323 - SYS___FPC_RS = 0x52C // 1324 - SYS_SIGTIMEDWAIT = 0x52D // 1325 - SYS_SIGWAITINFO = 0x52E // 1326 - SYS___CHKBFP = 0x52F // 1327 - SYS___W_PIOCTL = 0x59E // 1438 - SYS___OSENV = 0x59F // 1439 - SYS_EXPORTWO = 0x5A1 // 1441 - SYS_EXPORTWORKUNIT = 0x5A1 // 1441 - SYS_UNDOEXPO = 0x5A2 // 1442 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 - SYS_IMPORTWO = 0x5A3 // 1443 - SYS_IMPORTWORKUNIT = 0x5A3 // 1443 - SYS_UNDOIMPO = 0x5A4 // 1444 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 - SYS_EXTRACTW = 0x5A5 // 1445 - SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 - SYS___CPL = 0x5A6 // 1446 - SYS___MAP_INIT = 0x5A7 // 1447 - SYS___MAP_SERVICE = 0x5A8 // 1448 - SYS_SIGQUEUE = 0x5A9 // 1449 - SYS___MOUNT = 0x5AA // 1450 - SYS___GETUSERID = 0x5AB // 1451 - SYS___IPDOMAINNAME = 0x5AC // 1452 - SYS_QUERYENC = 0x5AD // 1453 - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 - SYS_CONNECTE = 0x5AE // 1454 - SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 - SYS___FP_SWAPMODE = 0x5AF // 1455 - SYS_STRTOLL = 0x5B0 // 1456 - SYS_STRTOULL = 0x5B1 // 1457 - SYS___DSA_PREV = 0x5B2 // 1458 - SYS___EP_FIND = 0x5B3 // 1459 - SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 - SYS___MSGRCV_TIMED = 0x5B7 // 1463 - SYS___SEMOP_TIMED = 0x5B8 // 1464 - SYS___GET_CPUID = 0x5B9 // 1465 - SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 - SYS_FTELLO = 0x5C8 // 1480 - SYS_FSEEKO = 0x5C9 // 1481 - SYS_LLDIV = 0x5CB // 1483 - SYS_WCSTOLL = 0x5CC // 1484 - SYS_WCSTOULL = 0x5CD // 1485 - SYS_LLABS = 0x5CE // 1486 - SYS___CONSOLE2 = 0x5D2 // 1490 - SYS_INET_NTOP = 0x5D3 // 1491 - SYS_INET_PTON = 0x5D4 // 1492 - SYS___RES = 0x5D6 // 1494 - SYS_RES_MKQUERY = 0x5D7 // 1495 - SYS_RES_INIT = 0x5D8 // 1496 - SYS_RES_QUERY = 0x5D9 // 1497 - SYS_RES_SEARCH = 0x5DA // 1498 - SYS_RES_SEND = 0x5DB // 1499 - SYS_RES_QUERYDOMAIN = 0x5DC // 1500 - SYS_DN_EXPAND = 0x5DD // 1501 - SYS_DN_SKIPNAME = 0x5DE // 1502 - SYS_DN_COMP = 0x5DF // 1503 - SYS_ASCTIME_R = 0x5E0 // 1504 - SYS_CTIME_R = 0x5E1 // 1505 - SYS_GMTIME_R = 0x5E2 // 1506 - SYS_LOCALTIME_R = 0x5E3 // 1507 - SYS_RAND_R = 0x5E4 // 1508 - SYS_STRTOK_R = 0x5E5 // 1509 - SYS_READDIR_R = 0x5E6 // 1510 - SYS_GETGRGID_R = 0x5E7 // 1511 - SYS_GETGRNAM_R = 0x5E8 // 1512 - SYS_GETLOGIN_R = 0x5E9 // 1513 - SYS_GETPWNAM_R = 0x5EA // 1514 - SYS_GETPWUID_R = 0x5EB // 1515 - SYS_TTYNAME_R = 0x5EC // 1516 - SYS_PTHREAD_ATFORK = 0x5ED // 1517 - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 - SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 - SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 - SYS___DISCARDDATA = 0x5F8 // 1528 - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 - SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 - SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 - SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 - SYS___ATANF_B = 0x602 // 1538 - SYS___ATANL_B = 0x603 // 1539 - SYS___CEILF_B = 0x604 // 1540 - SYS___CEILL_B = 0x605 // 1541 - SYS___COSF_B = 0x606 // 1542 - SYS___COSL_B = 0x607 // 1543 - SYS___FABSF_B = 0x608 // 1544 - SYS___FABSL_B = 0x609 // 1545 - SYS___FLOORF_B = 0x60A // 1546 - SYS___FLOORL_B = 0x60B // 1547 - SYS___FREXPF_B = 0x60C // 1548 - SYS___FREXPL_B = 0x60D // 1549 - SYS___LDEXPF_B = 0x60E // 1550 - SYS___LDEXPL_B = 0x60F // 1551 - SYS___SINF_B = 0x610 // 1552 - SYS___SINL_B = 0x611 // 1553 - SYS___TANF_B = 0x612 // 1554 - SYS___TANL_B = 0x613 // 1555 - SYS___TANHF_B = 0x614 // 1556 - SYS___TANHL_B = 0x615 // 1557 - SYS___ACOSF_B = 0x616 // 1558 - SYS___ACOSL_B = 0x617 // 1559 - SYS___ASINF_B = 0x618 // 1560 - SYS___ASINL_B = 0x619 // 1561 - SYS___ATAN2F_B = 0x61A // 1562 - SYS___ATAN2L_B = 0x61B // 1563 - SYS___COSHF_B = 0x61C // 1564 - SYS___COSHL_B = 0x61D // 1565 - SYS___EXPF_B = 0x61E // 1566 - SYS___EXPL_B = 0x61F // 1567 - SYS___LOGF_B = 0x620 // 1568 - SYS___LOGL_B = 0x621 // 1569 - SYS___LOG10F_B = 0x622 // 1570 - SYS___LOG10L_B = 0x623 // 1571 - SYS___POWF_B = 0x624 // 1572 - SYS___POWL_B = 0x625 // 1573 - SYS___SINHF_B = 0x626 // 1574 - SYS___SINHL_B = 0x627 // 1575 - SYS___SQRTF_B = 0x628 // 1576 - SYS___SQRTL_B = 0x629 // 1577 - SYS___ABSF_B = 0x62A // 1578 - SYS___ABS_B = 0x62B // 1579 - SYS___ABSL_B = 0x62C // 1580 - SYS___FMODF_B = 0x62D // 1581 - SYS___FMODL_B = 0x62E // 1582 - SYS___MODFF_B = 0x62F // 1583 - SYS___MODFL_B = 0x630 // 1584 - SYS_ABSF = 0x631 // 1585 - SYS_ABSL = 0x632 // 1586 - SYS_ACOSF = 0x633 // 1587 - SYS_ACOSL = 0x634 // 1588 - SYS_ASINF = 0x635 // 1589 - SYS_ASINL = 0x636 // 1590 - SYS_ATAN2F = 0x637 // 1591 - SYS_ATAN2L = 0x638 // 1592 - SYS_ATANF = 0x639 // 1593 - SYS_ATANL = 0x63A // 1594 - SYS_CEILF = 0x63B // 1595 - SYS_CEILL = 0x63C // 1596 - SYS_COSF = 0x63D // 1597 - SYS_COSL = 0x63E // 1598 - SYS_COSHF = 0x63F // 1599 - SYS_COSHL = 0x640 // 1600 - SYS_EXPF = 0x641 // 1601 - SYS_EXPL = 0x642 // 1602 - SYS_TANHF = 0x643 // 1603 - SYS_TANHL = 0x644 // 1604 - SYS_LOG10F = 0x645 // 1605 - SYS_LOG10L = 0x646 // 1606 - SYS_LOGF = 0x647 // 1607 - SYS_LOGL = 0x648 // 1608 - SYS_POWF = 0x649 // 1609 - SYS_POWL = 0x64A // 1610 - SYS_SINF = 0x64B // 1611 - SYS_SINL = 0x64C // 1612 - SYS_SQRTF = 0x64D // 1613 - SYS_SQRTL = 0x64E // 1614 - SYS_SINHF = 0x64F // 1615 - SYS_SINHL = 0x650 // 1616 - SYS_TANF = 0x651 // 1617 - SYS_TANL = 0x652 // 1618 - SYS_FABSF = 0x653 // 1619 - SYS_FABSL = 0x654 // 1620 - SYS_FLOORF = 0x655 // 1621 - SYS_FLOORL = 0x656 // 1622 - SYS_FMODF = 0x657 // 1623 - SYS_FMODL = 0x658 // 1624 - SYS_FREXPF = 0x659 // 1625 - SYS_FREXPL = 0x65A // 1626 - SYS_LDEXPF = 0x65B // 1627 - SYS_LDEXPL = 0x65C // 1628 - SYS_MODFF = 0x65D // 1629 - SYS_MODFL = 0x65E // 1630 - SYS_BTOWC = 0x65F // 1631 - SYS___CHATTR = 0x660 // 1632 - SYS___FCHATTR = 0x661 // 1633 - SYS___TOCCSID = 0x662 // 1634 - SYS___CSNAMETYPE = 0x663 // 1635 - SYS___TOCSNAME = 0x664 // 1636 - SYS___CCSIDTYPE = 0x665 // 1637 - SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 - SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 - SYS_DN_FIND = 0x668 // 1640 - SYS___GETHOSTBYADDR_A = 0x669 // 1641 - SYS___GETHOSTBYNAME_A = 0x66A // 1642 - SYS___RES_INIT_A = 0x66B // 1643 - SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 - SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 - SYS___CHARMAP_INIT_A = 0x66E // 1646 - SYS___MBLEN_A = 0x66F // 1647 - SYS___MBLEN_SB_A = 0x670 // 1648 - SYS___MBLEN_STD_A = 0x671 // 1649 - SYS___MBLEN_UTF = 0x672 // 1650 - SYS___MBSTOWCS_A = 0x673 // 1651 - SYS___MBSTOWCS_STD_A = 0x674 // 1652 - SYS___MBTOWC_A = 0x675 // 1653 - SYS___MBTOWC_ISO1 = 0x676 // 1654 - SYS___MBTOWC_SBCS = 0x677 // 1655 - SYS___MBTOWC_MBCS = 0x678 // 1656 - SYS___MBTOWC_UTF = 0x679 // 1657 - SYS___WCSTOMBS_A = 0x67A // 1658 - SYS___WCSTOMBS_STD_A = 0x67B // 1659 - SYS___WCSWIDTH_A = 0x67C // 1660 - SYS___GETGRGID_R_A = 0x67D // 1661 - SYS___WCSWIDTH_STD_A = 0x67E // 1662 - SYS___WCSWIDTH_ASIA = 0x67F // 1663 - SYS___CSID_A = 0x680 // 1664 - SYS___CSID_STD_A = 0x681 // 1665 - SYS___WCSID_A = 0x682 // 1666 - SYS___WCSID_STD_A = 0x683 // 1667 - SYS___WCTOMB_A = 0x684 // 1668 - SYS___WCTOMB_ISO1 = 0x685 // 1669 - SYS___WCTOMB_STD_A = 0x686 // 1670 - SYS___WCTOMB_UTF = 0x687 // 1671 - SYS___WCWIDTH_A = 0x688 // 1672 - SYS___GETGRNAM_R_A = 0x689 // 1673 - SYS___WCWIDTH_STD_A = 0x68A // 1674 - SYS___WCWIDTH_ASIA = 0x68B // 1675 - SYS___GETPWNAM_R_A = 0x68C // 1676 - SYS___GETPWUID_R_A = 0x68D // 1677 - SYS___GETLOGIN_R_A = 0x68E // 1678 - SYS___TTYNAME_R_A = 0x68F // 1679 - SYS___READDIR_R_A = 0x690 // 1680 - SYS___E2A_S = 0x691 // 1681 - SYS___FNMATCH_A = 0x692 // 1682 - SYS___FNMATCH_C_A = 0x693 // 1683 - SYS___EXECL_A = 0x694 // 1684 - SYS___FNMATCH_STD_A = 0x695 // 1685 - SYS___REGCOMP_A = 0x696 // 1686 - SYS___REGCOMP_STD_A = 0x697 // 1687 - SYS___REGERROR_A = 0x698 // 1688 - SYS___REGERROR_STD_A = 0x699 // 1689 - SYS___REGEXEC_A = 0x69A // 1690 - SYS___REGEXEC_STD_A = 0x69B // 1691 - SYS___REGFREE_A = 0x69C // 1692 - SYS___REGFREE_STD_A = 0x69D // 1693 - SYS___STRCOLL_A = 0x69E // 1694 - SYS___STRCOLL_C_A = 0x69F // 1695 - SYS___EXECLE_A = 0x6A0 // 1696 - SYS___STRCOLL_STD_A = 0x6A1 // 1697 - SYS___STRXFRM_A = 0x6A2 // 1698 - SYS___STRXFRM_C_A = 0x6A3 // 1699 - SYS___EXECLP_A = 0x6A4 // 1700 - SYS___STRXFRM_STD_A = 0x6A5 // 1701 - SYS___WCSCOLL_A = 0x6A6 // 1702 - SYS___WCSCOLL_C_A = 0x6A7 // 1703 - SYS___WCSCOLL_STD_A = 0x6A8 // 1704 - SYS___WCSXFRM_A = 0x6A9 // 1705 - SYS___WCSXFRM_C_A = 0x6AA // 1706 - SYS___WCSXFRM_STD_A = 0x6AB // 1707 - SYS___COLLATE_INIT_A = 0x6AC // 1708 - SYS___WCTYPE_A = 0x6AD // 1709 - SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 - SYS___CTYPE_INIT_A = 0x6AF // 1711 - SYS___ISWCTYPE_A = 0x6B0 // 1712 - SYS___EXECV_A = 0x6B1 // 1713 - SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 - SYS___TOWLOWER_A = 0x6B3 // 1715 - SYS___TOWLOWER_STD_A = 0x6B4 // 1716 - SYS___TOWUPPER_A = 0x6B5 // 1717 - SYS___TOWUPPER_STD_A = 0x6B6 // 1718 - SYS___LOCALE_INIT_A = 0x6B7 // 1719 - SYS___LOCALECONV_A = 0x6B8 // 1720 - SYS___LOCALECONV_STD_A = 0x6B9 // 1721 - SYS___NL_LANGINFO_A = 0x6BA // 1722 - SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 - SYS___MONETARY_INIT_A = 0x6BC // 1724 - SYS___STRFMON_A = 0x6BD // 1725 - SYS___STRFMON_STD_A = 0x6BE // 1726 - SYS___GETADDRINFO_A = 0x6BF // 1727 - SYS___CATGETS_A = 0x6C0 // 1728 - SYS___EXECVE_A = 0x6C1 // 1729 - SYS___EXECVP_A = 0x6C2 // 1730 - SYS___SPAWN_A = 0x6C3 // 1731 - SYS___GETNAMEINFO_A = 0x6C4 // 1732 - SYS___SPAWNP_A = 0x6C5 // 1733 - SYS___NUMERIC_INIT_A = 0x6C6 // 1734 - SYS___RESP_INIT_A = 0x6C7 // 1735 - SYS___RPMATCH_A = 0x6C8 // 1736 - SYS___RPMATCH_C_A = 0x6C9 // 1737 - SYS___RPMATCH_STD_A = 0x6CA // 1738 - SYS___TIME_INIT_A = 0x6CB // 1739 - SYS___STRFTIME_A = 0x6CC // 1740 - SYS___STRFTIME_STD_A = 0x6CD // 1741 - SYS___STRPTIME_A = 0x6CE // 1742 - SYS___STRPTIME_STD_A = 0x6CF // 1743 - SYS___WCSFTIME_A = 0x6D0 // 1744 - SYS___WCSFTIME_STD_A = 0x6D1 // 1745 - SYS_____SPAWN2_A = 0x6D2 // 1746 - SYS_____SPAWNP2_A = 0x6D3 // 1747 - SYS___SYNTAX_INIT_A = 0x6D4 // 1748 - SYS___TOD_INIT_A = 0x6D5 // 1749 - SYS___NL_CSINFO_A = 0x6D6 // 1750 - SYS___NL_MONINFO_A = 0x6D7 // 1751 - SYS___NL_NUMINFO_A = 0x6D8 // 1752 - SYS___NL_RESPINFO_A = 0x6D9 // 1753 - SYS___NL_TIMINFO_A = 0x6DA // 1754 - SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 - SYS___IF_INDEXTONAME_A = 0x6DC // 1756 - SYS___PRINTF_A = 0x6DD // 1757 - SYS___ICONV_OPEN_A = 0x6DE // 1758 - SYS___DLLLOAD_A = 0x6DF // 1759 - SYS___DLLQUERYFN_A = 0x6E0 // 1760 - SYS___DLLQUERYVAR_A = 0x6E1 // 1761 - SYS_____CHATTR_A = 0x6E2 // 1762 - SYS___E2A_L = 0x6E3 // 1763 - SYS_____TOCCSID_A = 0x6E4 // 1764 - SYS_____TOCSNAME_A = 0x6E5 // 1765 - SYS_____CCSIDTYPE_A = 0x6E6 // 1766 - SYS_____CSNAMETYPE_A = 0x6E7 // 1767 - SYS___CHMOD_A = 0x6E8 // 1768 - SYS___MKDIR_A = 0x6E9 // 1769 - SYS___STAT_A = 0x6EA // 1770 - SYS___STAT_O_A = 0x6EB // 1771 - SYS___MKFIFO_A = 0x6EC // 1772 - SYS_____OPEN_STAT_A = 0x6ED // 1773 - SYS___LSTAT_A = 0x6EE // 1774 - SYS___LSTAT_O_A = 0x6EF // 1775 - SYS___MKNOD_A = 0x6F0 // 1776 - SYS___MOUNT_A = 0x6F1 // 1777 - SYS___UMOUNT_A = 0x6F2 // 1778 - SYS___CHAUDIT_A = 0x6F4 // 1780 - SYS___W_GETMNTENT_A = 0x6F5 // 1781 - SYS___CREAT_A = 0x6F6 // 1782 - SYS___OPEN_A = 0x6F7 // 1783 - SYS___SETLOCALE_A = 0x6F9 // 1785 - SYS___FPRINTF_A = 0x6FA // 1786 - SYS___SPRINTF_A = 0x6FB // 1787 - SYS___VFPRINTF_A = 0x6FC // 1788 - SYS___VPRINTF_A = 0x6FD // 1789 - SYS___VSPRINTF_A = 0x6FE // 1790 - SYS___VSWPRINTF_A = 0x6FF // 1791 - SYS___SWPRINTF_A = 0x700 // 1792 - SYS___FSCANF_A = 0x701 // 1793 - SYS___SCANF_A = 0x702 // 1794 - SYS___SSCANF_A = 0x703 // 1795 - SYS___SWSCANF_A = 0x704 // 1796 - SYS___ATOF_A = 0x705 // 1797 - SYS___ATOI_A = 0x706 // 1798 - SYS___ATOL_A = 0x707 // 1799 - SYS___STRTOD_A = 0x708 // 1800 - SYS___STRTOL_A = 0x709 // 1801 - SYS___STRTOUL_A = 0x70A // 1802 - SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 - SYS___A64L_A = 0x70C // 1804 - SYS___ECVT_A = 0x70D // 1805 - SYS___FCVT_A = 0x70E // 1806 - SYS___GCVT_A = 0x70F // 1807 - SYS___L64A_A = 0x710 // 1808 - SYS___STRERROR_A = 0x711 // 1809 - SYS___PERROR_A = 0x712 // 1810 - SYS___FETCH_A = 0x713 // 1811 - SYS___GETENV_A = 0x714 // 1812 - SYS___MKSTEMP_A = 0x717 // 1815 - SYS___PTSNAME_A = 0x718 // 1816 - SYS___PUTENV_A = 0x719 // 1817 - SYS___REALPATH_A = 0x71A // 1818 - SYS___SETENV_A = 0x71B // 1819 - SYS___SYSTEM_A = 0x71C // 1820 - SYS___GETOPT_A = 0x71D // 1821 - SYS___CATOPEN_A = 0x71E // 1822 - SYS___ACCESS_A = 0x71F // 1823 - SYS___CHDIR_A = 0x720 // 1824 - SYS___CHOWN_A = 0x721 // 1825 - SYS___CHROOT_A = 0x722 // 1826 - SYS___GETCWD_A = 0x723 // 1827 - SYS___GETWD_A = 0x724 // 1828 - SYS___LCHOWN_A = 0x725 // 1829 - SYS___LINK_A = 0x726 // 1830 - SYS___PATHCONF_A = 0x727 // 1831 - SYS___IF_NAMEINDEX_A = 0x728 // 1832 - SYS___READLINK_A = 0x729 // 1833 - SYS___RMDIR_A = 0x72A // 1834 - SYS___STATVFS_A = 0x72B // 1835 - SYS___SYMLINK_A = 0x72C // 1836 - SYS___TRUNCATE_A = 0x72D // 1837 - SYS___UNLINK_A = 0x72E // 1838 - SYS___GAI_STRERROR_A = 0x72F // 1839 - SYS___EXTLINK_NP_A = 0x730 // 1840 - SYS___ISALNUM_A = 0x731 // 1841 - SYS___ISALPHA_A = 0x732 // 1842 - SYS___A2E_S = 0x733 // 1843 - SYS___ISCNTRL_A = 0x734 // 1844 - SYS___ISDIGIT_A = 0x735 // 1845 - SYS___ISGRAPH_A = 0x736 // 1846 - SYS___ISLOWER_A = 0x737 // 1847 - SYS___ISPRINT_A = 0x738 // 1848 - SYS___ISPUNCT_A = 0x739 // 1849 - SYS___ISSPACE_A = 0x73A // 1850 - SYS___ISUPPER_A = 0x73B // 1851 - SYS___ISXDIGIT_A = 0x73C // 1852 - SYS___TOLOWER_A = 0x73D // 1853 - SYS___TOUPPER_A = 0x73E // 1854 - SYS___ISWALNUM_A = 0x73F // 1855 - SYS___ISWALPHA_A = 0x740 // 1856 - SYS___A2E_L = 0x741 // 1857 - SYS___ISWCNTRL_A = 0x742 // 1858 - SYS___ISWDIGIT_A = 0x743 // 1859 - SYS___ISWGRAPH_A = 0x744 // 1860 - SYS___ISWLOWER_A = 0x745 // 1861 - SYS___ISWPRINT_A = 0x746 // 1862 - SYS___ISWPUNCT_A = 0x747 // 1863 - SYS___ISWSPACE_A = 0x748 // 1864 - SYS___ISWUPPER_A = 0x749 // 1865 - SYS___ISWXDIGIT_A = 0x74A // 1866 - SYS___CONFSTR_A = 0x74B // 1867 - SYS___FTOK_A = 0x74C // 1868 - SYS___MKTEMP_A = 0x74D // 1869 - SYS___FDOPEN_A = 0x74E // 1870 - SYS___FLDATA_A = 0x74F // 1871 - SYS___REMOVE_A = 0x750 // 1872 - SYS___RENAME_A = 0x751 // 1873 - SYS___TMPNAM_A = 0x752 // 1874 - SYS___FOPEN_A = 0x753 // 1875 - SYS___FREOPEN_A = 0x754 // 1876 - SYS___CUSERID_A = 0x755 // 1877 - SYS___POPEN_A = 0x756 // 1878 - SYS___TEMPNAM_A = 0x757 // 1879 - SYS___FTW_A = 0x758 // 1880 - SYS___GETGRENT_A = 0x759 // 1881 - SYS___GETGRGID_A = 0x75A // 1882 - SYS___GETGRNAM_A = 0x75B // 1883 - SYS___GETGROUPSBYNAME_A = 0x75C // 1884 - SYS___GETHOSTENT_A = 0x75D // 1885 - SYS___GETHOSTNAME_A = 0x75E // 1886 - SYS___GETLOGIN_A = 0x75F // 1887 - SYS___INET_NTOP_A = 0x760 // 1888 - SYS___GETPASS_A = 0x761 // 1889 - SYS___GETPWENT_A = 0x762 // 1890 - SYS___GETPWNAM_A = 0x763 // 1891 - SYS___GETPWUID_A = 0x764 // 1892 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 - SYS___CHECKSCHENV_A = 0x766 // 1894 - SYS___CONNECTSERVER_A = 0x767 // 1895 - SYS___CONNECTWORKMGR_A = 0x768 // 1896 - SYS_____CONSOLE_A = 0x769 // 1897 - SYS___CREATEWORKUNIT_A = 0x76A // 1898 - SYS___CTERMID_A = 0x76B // 1899 - SYS___FMTMSG_A = 0x76C // 1900 - SYS___INITGROUPS_A = 0x76D // 1901 - SYS_____LOGIN_A = 0x76E // 1902 - SYS___MSGRCV_A = 0x76F // 1903 - SYS___MSGSND_A = 0x770 // 1904 - SYS___MSGXRCV_A = 0x771 // 1905 - SYS___NFTW_A = 0x772 // 1906 - SYS_____PASSWD_A = 0x773 // 1907 - SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 - SYS___QUERYMETRICS_A = 0x775 // 1909 - SYS___QUERYSCHENV = 0x776 // 1910 - SYS___READV_A = 0x777 // 1911 - SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 - SYS_____SERVER_INIT_A = 0x779 // 1913 - SYS_____SERVER_PWU_A = 0x77A // 1914 - SYS___STRCASECMP_A = 0x77B // 1915 - SYS___STRNCASECMP_A = 0x77C // 1916 - SYS___TTYNAME_A = 0x77D // 1917 - SYS___UNAME_A = 0x77E // 1918 - SYS___UTIMES_A = 0x77F // 1919 - SYS___W_GETPSENT_A = 0x780 // 1920 - SYS___WRITEV_A = 0x781 // 1921 - SYS___W_STATFS_A = 0x782 // 1922 - SYS___W_STATVFS_A = 0x783 // 1923 - SYS___FPUTC_A = 0x784 // 1924 - SYS___PUTCHAR_A = 0x785 // 1925 - SYS___PUTS_A = 0x786 // 1926 - SYS___FGETS_A = 0x787 // 1927 - SYS___GETS_A = 0x788 // 1928 - SYS___FPUTS_A = 0x789 // 1929 - SYS___FREAD_A = 0x78A // 1930 - SYS___FWRITE_A = 0x78B // 1931 - SYS___OPEN_O_A = 0x78C // 1932 - SYS___ISASCII = 0x78D // 1933 - SYS___CREAT_O_A = 0x78E // 1934 - SYS___ENVNA = 0x78F // 1935 - SYS___PUTC_A = 0x790 // 1936 - SYS___AE_THREAD_SETMODE = 0x791 // 1937 - SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 - SYS___GETNETBYADDR_A = 0x793 // 1939 - SYS___GETNETBYNAME_A = 0x794 // 1940 - SYS___GETNETENT_A = 0x795 // 1941 - SYS___GETPROTOBYNAME_A = 0x796 // 1942 - SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 - SYS___GETPROTOENT_A = 0x798 // 1944 - SYS___GETSERVBYNAME_A = 0x799 // 1945 - SYS___GETSERVBYPORT_A = 0x79A // 1946 - SYS___GETSERVENT_A = 0x79B // 1947 - SYS___ASCTIME_A = 0x79C // 1948 - SYS___CTIME_A = 0x79D // 1949 - SYS___GETDATE_A = 0x79E // 1950 - SYS___TZSET_A = 0x79F // 1951 - SYS___UTIME_A = 0x7A0 // 1952 - SYS___ASCTIME_R_A = 0x7A1 // 1953 - SYS___CTIME_R_A = 0x7A2 // 1954 - SYS___STRTOLL_A = 0x7A3 // 1955 - SYS___STRTOULL_A = 0x7A4 // 1956 - SYS___FPUTWC_A = 0x7A5 // 1957 - SYS___PUTWC_A = 0x7A6 // 1958 - SYS___PUTWCHAR_A = 0x7A7 // 1959 - SYS___FPUTWS_A = 0x7A8 // 1960 - SYS___UNGETWC_A = 0x7A9 // 1961 - SYS___FGETWC_A = 0x7AA // 1962 - SYS___GETWC_A = 0x7AB // 1963 - SYS___GETWCHAR_A = 0x7AC // 1964 - SYS___FGETWS_A = 0x7AD // 1965 - SYS___GETTIMEOFDAY_A = 0x7AE // 1966 - SYS___GMTIME_A = 0x7AF // 1967 - SYS___GMTIME_R_A = 0x7B0 // 1968 - SYS___LOCALTIME_A = 0x7B1 // 1969 - SYS___LOCALTIME_R_A = 0x7B2 // 1970 - SYS___MKTIME_A = 0x7B3 // 1971 - SYS___TZZNA = 0x7B4 // 1972 - SYS_UNATEXIT = 0x7B5 // 1973 - SYS___CEE3DMP_A = 0x7B6 // 1974 - SYS___CDUMP_A = 0x7B7 // 1975 - SYS___CSNAP_A = 0x7B8 // 1976 - SYS___CTEST_A = 0x7B9 // 1977 - SYS___CTRACE_A = 0x7BA // 1978 - SYS___VSWPRNTF2_A = 0x7BB // 1979 - SYS___INET_PTON_A = 0x7BC // 1980 - SYS___SYSLOG_A = 0x7BD // 1981 - SYS___CRYPT_A = 0x7BE // 1982 - SYS_____OPENDIR2_A = 0x7BF // 1983 - SYS_____READDIR2_A = 0x7C0 // 1984 - SYS___OPENDIR_A = 0x7C2 // 1986 - SYS___READDIR_A = 0x7C3 // 1987 - SYS_PREAD = 0x7C7 // 1991 - SYS_PWRITE = 0x7C8 // 1992 - SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 - SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 - SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 - SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 - SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 - SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 - SYS_FWPRINTF = 0x7D1 // 2001 - SYS_WPRINTF = 0x7D2 // 2002 - SYS_VFWPRINT = 0x7D3 // 2003 - SYS_VFWPRINTF = 0x7D3 // 2003 - SYS_VWPRINTF = 0x7D4 // 2004 - SYS_FWSCANF = 0x7D5 // 2005 - SYS_WSCANF = 0x7D6 // 2006 - SYS_WCTRANS = 0x7D7 // 2007 - SYS_TOWCTRAN = 0x7D8 // 2008 - SYS_TOWCTRANS = 0x7D8 // 2008 - SYS___WCSTOD_A = 0x7D9 // 2009 - SYS___WCSTOL_A = 0x7DA // 2010 - SYS___WCSTOUL_A = 0x7DB // 2011 - SYS___BASENAME_A = 0x7DC // 2012 - SYS___DIRNAME_A = 0x7DD // 2013 - SYS___GLOB_A = 0x7DE // 2014 - SYS_FWIDE = 0x7DF // 2015 - SYS___OSNAME = 0x7E0 // 2016 - SYS_____OSNAME_A = 0x7E1 // 2017 - SYS___BTOWC_A = 0x7E4 // 2020 - SYS___WCTOB_A = 0x7E5 // 2021 - SYS___DBM_OPEN_A = 0x7E6 // 2022 - SYS___VFPRINTF2_A = 0x7E7 // 2023 - SYS___VPRINTF2_A = 0x7E8 // 2024 - SYS___VSPRINTF2_A = 0x7E9 // 2025 - SYS___CEIL_H = 0x7EA // 2026 - SYS___FLOOR_H = 0x7EB // 2027 - SYS___MODF_H = 0x7EC // 2028 - SYS___FABS_H = 0x7ED // 2029 - SYS___J0_H = 0x7EE // 2030 - SYS___J1_H = 0x7EF // 2031 - SYS___JN_H = 0x7F0 // 2032 - SYS___Y0_H = 0x7F1 // 2033 - SYS___Y1_H = 0x7F2 // 2034 - SYS___YN_H = 0x7F3 // 2035 - SYS___CEILF_H = 0x7F4 // 2036 - SYS___CEILL_H = 0x7F5 // 2037 - SYS___FLOORF_H = 0x7F6 // 2038 - SYS___FLOORL_H = 0x7F7 // 2039 - SYS___MODFF_H = 0x7F8 // 2040 - SYS___MODFL_H = 0x7F9 // 2041 - SYS___FABSF_H = 0x7FA // 2042 - SYS___FABSL_H = 0x7FB // 2043 - SYS___MALLOC24 = 0x7FC // 2044 - SYS___MALLOC31 = 0x7FD // 2045 - SYS_ACL_INIT = 0x7FE // 2046 - SYS_ACL_FREE = 0x7FF // 2047 - SYS_ACL_FIRST_ENTRY = 0x800 // 2048 - SYS_ACL_GET_ENTRY = 0x801 // 2049 - SYS_ACL_VALID = 0x802 // 2050 - SYS_ACL_CREATE_ENTRY = 0x803 // 2051 - SYS_ACL_DELETE_ENTRY = 0x804 // 2052 - SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 - SYS_ACL_DELETE_FD = 0x806 // 2054 - SYS_ACL_DELETE_FILE = 0x807 // 2055 - SYS_ACL_GET_FD = 0x808 // 2056 - SYS_ACL_GET_FILE = 0x809 // 2057 - SYS_ACL_SET_FD = 0x80A // 2058 - SYS_ACL_SET_FILE = 0x80B // 2059 - SYS_ACL_FROM_TEXT = 0x80C // 2060 - SYS_ACL_TO_TEXT = 0x80D // 2061 - SYS_ACL_SORT = 0x80E // 2062 - SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 - SYS___ERFL_B = 0x810 // 2064 - SYS___ERFCL_B = 0x811 // 2065 - SYS___LGAMMAL_B = 0x812 // 2066 - SYS___SETHOOKEVENTS = 0x813 // 2067 - SYS_IF_NAMETOINDEX = 0x814 // 2068 - SYS_IF_INDEXTONAME = 0x815 // 2069 - SYS_IF_NAMEINDEX = 0x816 // 2070 - SYS_IF_FREENAMEINDEX = 0x817 // 2071 - SYS_GETADDRINFO = 0x818 // 2072 - SYS_GETNAMEINFO = 0x819 // 2073 - SYS_FREEADDRINFO = 0x81A // 2074 - SYS_GAI_STRERROR = 0x81B // 2075 - SYS_REXEC_AF = 0x81C // 2076 - SYS___POE = 0x81D // 2077 - SYS___DYNALLOC_A = 0x81F // 2079 - SYS___DYNFREE_A = 0x820 // 2080 - SYS___RES_QUERY_A = 0x821 // 2081 - SYS___RES_SEARCH_A = 0x822 // 2082 - SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 - SYS___RES_MKQUERY_A = 0x824 // 2084 - SYS___RES_SEND_A = 0x825 // 2085 - SYS___DN_EXPAND_A = 0x826 // 2086 - SYS___DN_SKIPNAME_A = 0x827 // 2087 - SYS___DN_COMP_A = 0x828 // 2088 - SYS___DN_FIND_A = 0x829 // 2089 - SYS___NLIST_A = 0x82A // 2090 - SYS_____TCGETCP_A = 0x82B // 2091 - SYS_____TCSETCP_A = 0x82C // 2092 - SYS_____W_PIOCTL_A = 0x82E // 2094 - SYS___INET_ADDR_A = 0x82F // 2095 - SYS___INET_NTOA_A = 0x830 // 2096 - SYS___INET_NETWORK_A = 0x831 // 2097 - SYS___ACCEPT_A = 0x832 // 2098 - SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 - SYS___BIND_A = 0x834 // 2100 - SYS___CONNECT_A = 0x835 // 2101 - SYS___GETPEERNAME_A = 0x836 // 2102 - SYS___GETSOCKNAME_A = 0x837 // 2103 - SYS___RECVFROM_A = 0x838 // 2104 - SYS___SENDTO_A = 0x839 // 2105 - SYS___SENDMSG_A = 0x83A // 2106 - SYS___RECVMSG_A = 0x83B // 2107 - SYS_____LCHATTR_A = 0x83C // 2108 - SYS___CABEND = 0x83D // 2109 - SYS___LE_CIB_GET = 0x83E // 2110 - SYS___SET_LAA_FOR_JIT = 0x83F // 2111 - SYS___LCHATTR = 0x840 // 2112 - SYS___WRITEDOWN = 0x841 // 2113 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 - SYS___ACOSHF_B = 0x843 // 2115 - SYS___ACOSHL_B = 0x844 // 2116 - SYS___ASINHF_B = 0x845 // 2117 - SYS___ASINHL_B = 0x846 // 2118 - SYS___ATANHF_B = 0x847 // 2119 - SYS___ATANHL_B = 0x848 // 2120 - SYS___CBRTF_B = 0x849 // 2121 - SYS___CBRTL_B = 0x84A // 2122 - SYS___COPYSIGNF_B = 0x84B // 2123 - SYS___COPYSIGNL_B = 0x84C // 2124 - SYS___COTANF_B = 0x84D // 2125 - SYS___COTAN_B = 0x84E // 2126 - SYS___COTANL_B = 0x84F // 2127 - SYS___EXP2F_B = 0x850 // 2128 - SYS___EXP2L_B = 0x851 // 2129 - SYS___EXPM1F_B = 0x852 // 2130 - SYS___EXPM1L_B = 0x853 // 2131 - SYS___FDIMF_B = 0x854 // 2132 - SYS___FDIM_B = 0x855 // 2133 - SYS___FDIML_B = 0x856 // 2134 - SYS___HYPOTF_B = 0x857 // 2135 - SYS___HYPOTL_B = 0x858 // 2136 - SYS___LOG1PF_B = 0x859 // 2137 - SYS___LOG1PL_B = 0x85A // 2138 - SYS___LOG2F_B = 0x85B // 2139 - SYS___LOG2_B = 0x85C // 2140 - SYS___LOG2L_B = 0x85D // 2141 - SYS___REMAINDERF_B = 0x85E // 2142 - SYS___REMAINDERL_B = 0x85F // 2143 - SYS___REMQUOF_B = 0x860 // 2144 - SYS___REMQUO_B = 0x861 // 2145 - SYS___REMQUOL_B = 0x862 // 2146 - SYS___TGAMMAF_B = 0x863 // 2147 - SYS___TGAMMA_B = 0x864 // 2148 - SYS___TGAMMAL_B = 0x865 // 2149 - SYS___TRUNCF_B = 0x866 // 2150 - SYS___TRUNC_B = 0x867 // 2151 - SYS___TRUNCL_B = 0x868 // 2152 - SYS___LGAMMAF_B = 0x869 // 2153 - SYS___LROUNDF_B = 0x86A // 2154 - SYS___LROUND_B = 0x86B // 2155 - SYS___ERFF_B = 0x86C // 2156 - SYS___ERFCF_B = 0x86D // 2157 - SYS_ACOSHF = 0x86E // 2158 - SYS_ACOSHL = 0x86F // 2159 - SYS_ASINHF = 0x870 // 2160 - SYS_ASINHL = 0x871 // 2161 - SYS_ATANHF = 0x872 // 2162 - SYS_ATANHL = 0x873 // 2163 - SYS_CBRTF = 0x874 // 2164 - SYS_CBRTL = 0x875 // 2165 - SYS_COPYSIGNF = 0x876 // 2166 - SYS_CPYSIGNF = 0x876 // 2166 - SYS_COPYSIGNL = 0x877 // 2167 - SYS_CPYSIGNL = 0x877 // 2167 - SYS_COTANF = 0x878 // 2168 - SYS___COTANF = 0x878 // 2168 - SYS_COTAN = 0x879 // 2169 - SYS___COTAN = 0x879 // 2169 - SYS_COTANL = 0x87A // 2170 - SYS___COTANL = 0x87A // 2170 - SYS_EXP2F = 0x87B // 2171 - SYS_EXP2L = 0x87C // 2172 - SYS_EXPM1F = 0x87D // 2173 - SYS_EXPM1L = 0x87E // 2174 - SYS_FDIMF = 0x87F // 2175 - SYS_FDIM = 0x881 // 2177 - SYS_FDIML = 0x882 // 2178 - SYS_HYPOTF = 0x883 // 2179 - SYS_HYPOTL = 0x884 // 2180 - SYS_LOG1PF = 0x885 // 2181 - SYS_LOG1PL = 0x886 // 2182 - SYS_LOG2F = 0x887 // 2183 - SYS_LOG2 = 0x888 // 2184 - SYS_LOG2L = 0x889 // 2185 - SYS_REMAINDERF = 0x88A // 2186 - SYS_REMAINDF = 0x88A // 2186 - SYS_REMAINDERL = 0x88B // 2187 - SYS_REMAINDL = 0x88B // 2187 - SYS_REMQUOF = 0x88C // 2188 - SYS_REMQUO = 0x88D // 2189 - SYS_REMQUOL = 0x88E // 2190 - SYS_TGAMMAF = 0x88F // 2191 - SYS_TGAMMA = 0x890 // 2192 - SYS_TGAMMAL = 0x891 // 2193 - SYS_TRUNCF = 0x892 // 2194 - SYS_TRUNC = 0x893 // 2195 - SYS_TRUNCL = 0x894 // 2196 - SYS_LGAMMAF = 0x895 // 2197 - SYS_LGAMMAL = 0x896 // 2198 - SYS_LROUNDF = 0x897 // 2199 - SYS_LROUND = 0x898 // 2200 - SYS_ERFF = 0x899 // 2201 - SYS_ERFL = 0x89A // 2202 - SYS_ERFCF = 0x89B // 2203 - SYS_ERFCL = 0x89C // 2204 - SYS___EXP2_B = 0x89D // 2205 - SYS_EXP2 = 0x89E // 2206 - SYS___FAR_JUMP = 0x89F // 2207 - SYS___TCGETATTR_A = 0x8A1 // 2209 - SYS___TCSETATTR_A = 0x8A2 // 2210 - SYS___SUPERKILL = 0x8A4 // 2212 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 - SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 - SYS___LE_MSG_GET = 0x8A7 // 2215 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 - SYS___LE_MSG_WRITE = 0x8A9 // 2217 - SYS___ITOA = 0x8AA // 2218 - SYS___UTOA = 0x8AB // 2219 - SYS___LTOA = 0x8AC // 2220 - SYS___ULTOA = 0x8AD // 2221 - SYS___LLTOA = 0x8AE // 2222 - SYS___ULLTOA = 0x8AF // 2223 - SYS___ITOA_A = 0x8B0 // 2224 - SYS___UTOA_A = 0x8B1 // 2225 - SYS___LTOA_A = 0x8B2 // 2226 - SYS___ULTOA_A = 0x8B3 // 2227 - SYS___LLTOA_A = 0x8B4 // 2228 - SYS___ULLTOA_A = 0x8B5 // 2229 - SYS_____GETENV_A = 0x8C3 // 2243 - SYS___REXEC_A = 0x8C4 // 2244 - SYS___REXEC_AF_A = 0x8C5 // 2245 - SYS___GETUTXENT_A = 0x8C6 // 2246 - SYS___GETUTXID_A = 0x8C7 // 2247 - SYS___GETUTXLINE_A = 0x8C8 // 2248 - SYS___PUTUTXLINE_A = 0x8C9 // 2249 - SYS_____UTMPXNAME_A = 0x8CA // 2250 - SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 - SYS___SNPRINTF_A = 0x8CD // 2253 - SYS___VSNPRINTF_A = 0x8CE // 2254 - SYS___DLOPEN_A = 0x8D0 // 2256 - SYS___DLSYM_A = 0x8D1 // 2257 - SYS___DLERROR_A = 0x8D2 // 2258 - SYS_FLOCKFILE = 0x8D3 // 2259 - SYS_FTRYLOCKFILE = 0x8D4 // 2260 - SYS_FUNLOCKFILE = 0x8D5 // 2261 - SYS_GETC_UNLOCKED = 0x8D6 // 2262 - SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 - SYS_PUTC_UNLOCKED = 0x8D8 // 2264 - SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 - SYS_SNPRINTF = 0x8DA // 2266 - SYS_VSNPRINTF = 0x8DB // 2267 - SYS_DLOPEN = 0x8DD // 2269 - SYS_DLSYM = 0x8DE // 2270 - SYS_DLCLOSE = 0x8DF // 2271 - SYS_DLERROR = 0x8E0 // 2272 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 - SYS___VHM_EVENT = 0x8E4 // 2276 - SYS___ABS_H = 0x8E6 // 2278 - SYS___ABSF_H = 0x8E7 // 2279 - SYS___ABSL_H = 0x8E8 // 2280 - SYS___ACOS_H = 0x8E9 // 2281 - SYS___ACOSF_H = 0x8EA // 2282 - SYS___ACOSL_H = 0x8EB // 2283 - SYS___ACOSH_H = 0x8EC // 2284 - SYS___ASIN_H = 0x8ED // 2285 - SYS___ASINF_H = 0x8EE // 2286 - SYS___ASINL_H = 0x8EF // 2287 - SYS___ASINH_H = 0x8F0 // 2288 - SYS___ATAN_H = 0x8F1 // 2289 - SYS___ATANF_H = 0x8F2 // 2290 - SYS___ATANL_H = 0x8F3 // 2291 - SYS___ATANH_H = 0x8F4 // 2292 - SYS___ATANHF_H = 0x8F5 // 2293 - SYS___ATANHL_H = 0x8F6 // 2294 - SYS___ATAN2_H = 0x8F7 // 2295 - SYS___ATAN2F_H = 0x8F8 // 2296 - SYS___ATAN2L_H = 0x8F9 // 2297 - SYS___CBRT_H = 0x8FA // 2298 - SYS___COPYSIGNF_H = 0x8FB // 2299 - SYS___COPYSIGNL_H = 0x8FC // 2300 - SYS___COS_H = 0x8FD // 2301 - SYS___COSF_H = 0x8FE // 2302 - SYS___COSL_H = 0x8FF // 2303 - SYS___COSHF_H = 0x900 // 2304 - SYS___COSHL_H = 0x901 // 2305 - SYS___COTAN_H = 0x902 // 2306 - SYS___COTANF_H = 0x903 // 2307 - SYS___COTANL_H = 0x904 // 2308 - SYS___ERF_H = 0x905 // 2309 - SYS___ERFF_H = 0x906 // 2310 - SYS___ERFL_H = 0x907 // 2311 - SYS___ERFC_H = 0x908 // 2312 - SYS___ERFCF_H = 0x909 // 2313 - SYS___ERFCL_H = 0x90A // 2314 - SYS___EXP_H = 0x90B // 2315 - SYS___EXPF_H = 0x90C // 2316 - SYS___EXPL_H = 0x90D // 2317 - SYS___EXPM1_H = 0x90E // 2318 - SYS___FDIM_H = 0x90F // 2319 - SYS___FDIMF_H = 0x910 // 2320 - SYS___FDIML_H = 0x911 // 2321 - SYS___FMOD_H = 0x912 // 2322 - SYS___FMODF_H = 0x913 // 2323 - SYS___FMODL_H = 0x914 // 2324 - SYS___GAMMA_H = 0x915 // 2325 - SYS___HYPOT_H = 0x916 // 2326 - SYS___ILOGB_H = 0x917 // 2327 - SYS___LGAMMA_H = 0x918 // 2328 - SYS___LGAMMAF_H = 0x919 // 2329 - SYS___LOG_H = 0x91A // 2330 - SYS___LOGF_H = 0x91B // 2331 - SYS___LOGL_H = 0x91C // 2332 - SYS___LOGB_H = 0x91D // 2333 - SYS___LOG2_H = 0x91E // 2334 - SYS___LOG2F_H = 0x91F // 2335 - SYS___LOG2L_H = 0x920 // 2336 - SYS___LOG1P_H = 0x921 // 2337 - SYS___LOG10_H = 0x922 // 2338 - SYS___LOG10F_H = 0x923 // 2339 - SYS___LOG10L_H = 0x924 // 2340 - SYS___LROUND_H = 0x925 // 2341 - SYS___LROUNDF_H = 0x926 // 2342 - SYS___NEXTAFTER_H = 0x927 // 2343 - SYS___POW_H = 0x928 // 2344 - SYS___POWF_H = 0x929 // 2345 - SYS___POWL_H = 0x92A // 2346 - SYS___REMAINDER_H = 0x92B // 2347 - SYS___RINT_H = 0x92C // 2348 - SYS___SCALB_H = 0x92D // 2349 - SYS___SIN_H = 0x92E // 2350 - SYS___SINF_H = 0x92F // 2351 - SYS___SINL_H = 0x930 // 2352 - SYS___SINH_H = 0x931 // 2353 - SYS___SINHF_H = 0x932 // 2354 - SYS___SINHL_H = 0x933 // 2355 - SYS___SQRT_H = 0x934 // 2356 - SYS___SQRTF_H = 0x935 // 2357 - SYS___SQRTL_H = 0x936 // 2358 - SYS___TAN_H = 0x937 // 2359 - SYS___TANF_H = 0x938 // 2360 - SYS___TANL_H = 0x939 // 2361 - SYS___TANH_H = 0x93A // 2362 - SYS___TANHF_H = 0x93B // 2363 - SYS___TANHL_H = 0x93C // 2364 - SYS___TGAMMA_H = 0x93D // 2365 - SYS___TGAMMAF_H = 0x93E // 2366 - SYS___TRUNC_H = 0x93F // 2367 - SYS___TRUNCF_H = 0x940 // 2368 - SYS___TRUNCL_H = 0x941 // 2369 - SYS___COSH_H = 0x942 // 2370 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 - SYS_VFSCANF = 0x944 // 2372 - SYS_VSCANF = 0x946 // 2374 - SYS_VSSCANF = 0x948 // 2376 - SYS_VFWSCANF = 0x94A // 2378 - SYS_VWSCANF = 0x94C // 2380 - SYS_VSWSCANF = 0x94E // 2382 - SYS_IMAXABS = 0x950 // 2384 - SYS_IMAXDIV = 0x951 // 2385 - SYS_STRTOIMAX = 0x952 // 2386 - SYS_STRTOUMAX = 0x953 // 2387 - SYS_WCSTOIMAX = 0x954 // 2388 - SYS_WCSTOUMAX = 0x955 // 2389 - SYS_ATOLL = 0x956 // 2390 - SYS_STRTOF = 0x957 // 2391 - SYS_STRTOLD = 0x958 // 2392 - SYS_WCSTOF = 0x959 // 2393 - SYS_WCSTOLD = 0x95A // 2394 - SYS_INET6_RTH_SPACE = 0x95B // 2395 - SYS_INET6_RTH_INIT = 0x95C // 2396 - SYS_INET6_RTH_ADD = 0x95D // 2397 - SYS_INET6_RTH_REVERSE = 0x95E // 2398 - SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 - SYS_INET6_RTH_GETADDR = 0x960 // 2400 - SYS_INET6_OPT_INIT = 0x961 // 2401 - SYS_INET6_OPT_APPEND = 0x962 // 2402 - SYS_INET6_OPT_FINISH = 0x963 // 2403 - SYS_INET6_OPT_SET_VAL = 0x964 // 2404 - SYS_INET6_OPT_NEXT = 0x965 // 2405 - SYS_INET6_OPT_FIND = 0x966 // 2406 - SYS_INET6_OPT_GET_VAL = 0x967 // 2407 - SYS___POW_I = 0x987 // 2439 - SYS___POW_I_B = 0x988 // 2440 - SYS___POW_I_H = 0x989 // 2441 - SYS___POW_II = 0x98A // 2442 - SYS___POW_II_B = 0x98B // 2443 - SYS___POW_II_H = 0x98C // 2444 - SYS_CABS = 0x98E // 2446 - SYS___CABS_B = 0x98F // 2447 - SYS___CABS_H = 0x990 // 2448 - SYS_CABSF = 0x991 // 2449 - SYS___CABSF_B = 0x992 // 2450 - SYS___CABSF_H = 0x993 // 2451 - SYS_CABSL = 0x994 // 2452 - SYS___CABSL_B = 0x995 // 2453 - SYS___CABSL_H = 0x996 // 2454 - SYS_CACOS = 0x997 // 2455 - SYS___CACOS_B = 0x998 // 2456 - SYS___CACOS_H = 0x999 // 2457 - SYS_CACOSF = 0x99A // 2458 - SYS___CACOSF_B = 0x99B // 2459 - SYS___CACOSF_H = 0x99C // 2460 - SYS_CACOSL = 0x99D // 2461 - SYS___CACOSL_B = 0x99E // 2462 - SYS___CACOSL_H = 0x99F // 2463 - SYS_CACOSH = 0x9A0 // 2464 - SYS___CACOSH_B = 0x9A1 // 2465 - SYS___CACOSH_H = 0x9A2 // 2466 - SYS_CACOSHF = 0x9A3 // 2467 - SYS___CACOSHF_B = 0x9A4 // 2468 - SYS___CACOSHF_H = 0x9A5 // 2469 - SYS_CACOSHL = 0x9A6 // 2470 - SYS___CACOSHL_B = 0x9A7 // 2471 - SYS___CACOSHL_H = 0x9A8 // 2472 - SYS_CARG = 0x9A9 // 2473 - SYS___CARG_B = 0x9AA // 2474 - SYS___CARG_H = 0x9AB // 2475 - SYS_CARGF = 0x9AC // 2476 - SYS___CARGF_B = 0x9AD // 2477 - SYS___CARGF_H = 0x9AE // 2478 - SYS_CARGL = 0x9AF // 2479 - SYS___CARGL_B = 0x9B0 // 2480 - SYS___CARGL_H = 0x9B1 // 2481 - SYS_CASIN = 0x9B2 // 2482 - SYS___CASIN_B = 0x9B3 // 2483 - SYS___CASIN_H = 0x9B4 // 2484 - SYS_CASINF = 0x9B5 // 2485 - SYS___CASINF_B = 0x9B6 // 2486 - SYS___CASINF_H = 0x9B7 // 2487 - SYS_CASINL = 0x9B8 // 2488 - SYS___CASINL_B = 0x9B9 // 2489 - SYS___CASINL_H = 0x9BA // 2490 - SYS_CASINH = 0x9BB // 2491 - SYS___CASINH_B = 0x9BC // 2492 - SYS___CASINH_H = 0x9BD // 2493 - SYS_CASINHF = 0x9BE // 2494 - SYS___CASINHF_B = 0x9BF // 2495 - SYS___CASINHF_H = 0x9C0 // 2496 - SYS_CASINHL = 0x9C1 // 2497 - SYS___CASINHL_B = 0x9C2 // 2498 - SYS___CASINHL_H = 0x9C3 // 2499 - SYS_CATAN = 0x9C4 // 2500 - SYS___CATAN_B = 0x9C5 // 2501 - SYS___CATAN_H = 0x9C6 // 2502 - SYS_CATANF = 0x9C7 // 2503 - SYS___CATANF_B = 0x9C8 // 2504 - SYS___CATANF_H = 0x9C9 // 2505 - SYS_CATANL = 0x9CA // 2506 - SYS___CATANL_B = 0x9CB // 2507 - SYS___CATANL_H = 0x9CC // 2508 - SYS_CATANH = 0x9CD // 2509 - SYS___CATANH_B = 0x9CE // 2510 - SYS___CATANH_H = 0x9CF // 2511 - SYS_CATANHF = 0x9D0 // 2512 - SYS___CATANHF_B = 0x9D1 // 2513 - SYS___CATANHF_H = 0x9D2 // 2514 - SYS_CATANHL = 0x9D3 // 2515 - SYS___CATANHL_B = 0x9D4 // 2516 - SYS___CATANHL_H = 0x9D5 // 2517 - SYS_CCOS = 0x9D6 // 2518 - SYS___CCOS_B = 0x9D7 // 2519 - SYS___CCOS_H = 0x9D8 // 2520 - SYS_CCOSF = 0x9D9 // 2521 - SYS___CCOSF_B = 0x9DA // 2522 - SYS___CCOSF_H = 0x9DB // 2523 - SYS_CCOSL = 0x9DC // 2524 - SYS___CCOSL_B = 0x9DD // 2525 - SYS___CCOSL_H = 0x9DE // 2526 - SYS_CCOSH = 0x9DF // 2527 - SYS___CCOSH_B = 0x9E0 // 2528 - SYS___CCOSH_H = 0x9E1 // 2529 - SYS_CCOSHF = 0x9E2 // 2530 - SYS___CCOSHF_B = 0x9E3 // 2531 - SYS___CCOSHF_H = 0x9E4 // 2532 - SYS_CCOSHL = 0x9E5 // 2533 - SYS___CCOSHL_B = 0x9E6 // 2534 - SYS___CCOSHL_H = 0x9E7 // 2535 - SYS_CEXP = 0x9E8 // 2536 - SYS___CEXP_B = 0x9E9 // 2537 - SYS___CEXP_H = 0x9EA // 2538 - SYS_CEXPF = 0x9EB // 2539 - SYS___CEXPF_B = 0x9EC // 2540 - SYS___CEXPF_H = 0x9ED // 2541 - SYS_CEXPL = 0x9EE // 2542 - SYS___CEXPL_B = 0x9EF // 2543 - SYS___CEXPL_H = 0x9F0 // 2544 - SYS_CIMAG = 0x9F1 // 2545 - SYS___CIMAG_B = 0x9F2 // 2546 - SYS___CIMAG_H = 0x9F3 // 2547 - SYS_CIMAGF = 0x9F4 // 2548 - SYS___CIMAGF_B = 0x9F5 // 2549 - SYS___CIMAGF_H = 0x9F6 // 2550 - SYS_CIMAGL = 0x9F7 // 2551 - SYS___CIMAGL_B = 0x9F8 // 2552 - SYS___CIMAGL_H = 0x9F9 // 2553 - SYS___CLOG = 0x9FA // 2554 - SYS___CLOG_B = 0x9FB // 2555 - SYS___CLOG_H = 0x9FC // 2556 - SYS_CLOGF = 0x9FD // 2557 - SYS___CLOGF_B = 0x9FE // 2558 - SYS___CLOGF_H = 0x9FF // 2559 - SYS_CLOGL = 0xA00 // 2560 - SYS___CLOGL_B = 0xA01 // 2561 - SYS___CLOGL_H = 0xA02 // 2562 - SYS_CONJ = 0xA03 // 2563 - SYS___CONJ_B = 0xA04 // 2564 - SYS___CONJ_H = 0xA05 // 2565 - SYS_CONJF = 0xA06 // 2566 - SYS___CONJF_B = 0xA07 // 2567 - SYS___CONJF_H = 0xA08 // 2568 - SYS_CONJL = 0xA09 // 2569 - SYS___CONJL_B = 0xA0A // 2570 - SYS___CONJL_H = 0xA0B // 2571 - SYS_CPOW = 0xA0C // 2572 - SYS___CPOW_B = 0xA0D // 2573 - SYS___CPOW_H = 0xA0E // 2574 - SYS_CPOWF = 0xA0F // 2575 - SYS___CPOWF_B = 0xA10 // 2576 - SYS___CPOWF_H = 0xA11 // 2577 - SYS_CPOWL = 0xA12 // 2578 - SYS___CPOWL_B = 0xA13 // 2579 - SYS___CPOWL_H = 0xA14 // 2580 - SYS_CPROJ = 0xA15 // 2581 - SYS___CPROJ_B = 0xA16 // 2582 - SYS___CPROJ_H = 0xA17 // 2583 - SYS_CPROJF = 0xA18 // 2584 - SYS___CPROJF_B = 0xA19 // 2585 - SYS___CPROJF_H = 0xA1A // 2586 - SYS_CPROJL = 0xA1B // 2587 - SYS___CPROJL_B = 0xA1C // 2588 - SYS___CPROJL_H = 0xA1D // 2589 - SYS_CREAL = 0xA1E // 2590 - SYS___CREAL_B = 0xA1F // 2591 - SYS___CREAL_H = 0xA20 // 2592 - SYS_CREALF = 0xA21 // 2593 - SYS___CREALF_B = 0xA22 // 2594 - SYS___CREALF_H = 0xA23 // 2595 - SYS_CREALL = 0xA24 // 2596 - SYS___CREALL_B = 0xA25 // 2597 - SYS___CREALL_H = 0xA26 // 2598 - SYS_CSIN = 0xA27 // 2599 - SYS___CSIN_B = 0xA28 // 2600 - SYS___CSIN_H = 0xA29 // 2601 - SYS_CSINF = 0xA2A // 2602 - SYS___CSINF_B = 0xA2B // 2603 - SYS___CSINF_H = 0xA2C // 2604 - SYS_CSINL = 0xA2D // 2605 - SYS___CSINL_B = 0xA2E // 2606 - SYS___CSINL_H = 0xA2F // 2607 - SYS_CSINH = 0xA30 // 2608 - SYS___CSINH_B = 0xA31 // 2609 - SYS___CSINH_H = 0xA32 // 2610 - SYS_CSINHF = 0xA33 // 2611 - SYS___CSINHF_B = 0xA34 // 2612 - SYS___CSINHF_H = 0xA35 // 2613 - SYS_CSINHL = 0xA36 // 2614 - SYS___CSINHL_B = 0xA37 // 2615 - SYS___CSINHL_H = 0xA38 // 2616 - SYS_CSQRT = 0xA39 // 2617 - SYS___CSQRT_B = 0xA3A // 2618 - SYS___CSQRT_H = 0xA3B // 2619 - SYS_CSQRTF = 0xA3C // 2620 - SYS___CSQRTF_B = 0xA3D // 2621 - SYS___CSQRTF_H = 0xA3E // 2622 - SYS_CSQRTL = 0xA3F // 2623 - SYS___CSQRTL_B = 0xA40 // 2624 - SYS___CSQRTL_H = 0xA41 // 2625 - SYS_CTAN = 0xA42 // 2626 - SYS___CTAN_B = 0xA43 // 2627 - SYS___CTAN_H = 0xA44 // 2628 - SYS_CTANF = 0xA45 // 2629 - SYS___CTANF_B = 0xA46 // 2630 - SYS___CTANF_H = 0xA47 // 2631 - SYS_CTANL = 0xA48 // 2632 - SYS___CTANL_B = 0xA49 // 2633 - SYS___CTANL_H = 0xA4A // 2634 - SYS_CTANH = 0xA4B // 2635 - SYS___CTANH_B = 0xA4C // 2636 - SYS___CTANH_H = 0xA4D // 2637 - SYS_CTANHF = 0xA4E // 2638 - SYS___CTANHF_B = 0xA4F // 2639 - SYS___CTANHF_H = 0xA50 // 2640 - SYS_CTANHL = 0xA51 // 2641 - SYS___CTANHL_B = 0xA52 // 2642 - SYS___CTANHL_H = 0xA53 // 2643 - SYS___ACOSHF_H = 0xA54 // 2644 - SYS___ACOSHL_H = 0xA55 // 2645 - SYS___ASINHF_H = 0xA56 // 2646 - SYS___ASINHL_H = 0xA57 // 2647 - SYS___CBRTF_H = 0xA58 // 2648 - SYS___CBRTL_H = 0xA59 // 2649 - SYS___COPYSIGN_B = 0xA5A // 2650 - SYS___EXPM1F_H = 0xA5B // 2651 - SYS___EXPM1L_H = 0xA5C // 2652 - SYS___EXP2_H = 0xA5D // 2653 - SYS___EXP2F_H = 0xA5E // 2654 - SYS___EXP2L_H = 0xA5F // 2655 - SYS___LOG1PF_H = 0xA60 // 2656 - SYS___LOG1PL_H = 0xA61 // 2657 - SYS___LGAMMAL_H = 0xA62 // 2658 - SYS_FMA = 0xA63 // 2659 - SYS___FMA_B = 0xA64 // 2660 - SYS___FMA_H = 0xA65 // 2661 - SYS_FMAF = 0xA66 // 2662 - SYS___FMAF_B = 0xA67 // 2663 - SYS___FMAF_H = 0xA68 // 2664 - SYS_FMAL = 0xA69 // 2665 - SYS___FMAL_B = 0xA6A // 2666 - SYS___FMAL_H = 0xA6B // 2667 - SYS_FMAX = 0xA6C // 2668 - SYS___FMAX_B = 0xA6D // 2669 - SYS___FMAX_H = 0xA6E // 2670 - SYS_FMAXF = 0xA6F // 2671 - SYS___FMAXF_B = 0xA70 // 2672 - SYS___FMAXF_H = 0xA71 // 2673 - SYS_FMAXL = 0xA72 // 2674 - SYS___FMAXL_B = 0xA73 // 2675 - SYS___FMAXL_H = 0xA74 // 2676 - SYS_FMIN = 0xA75 // 2677 - SYS___FMIN_B = 0xA76 // 2678 - SYS___FMIN_H = 0xA77 // 2679 - SYS_FMINF = 0xA78 // 2680 - SYS___FMINF_B = 0xA79 // 2681 - SYS___FMINF_H = 0xA7A // 2682 - SYS_FMINL = 0xA7B // 2683 - SYS___FMINL_B = 0xA7C // 2684 - SYS___FMINL_H = 0xA7D // 2685 - SYS_ILOGBF = 0xA7E // 2686 - SYS___ILOGBF_B = 0xA7F // 2687 - SYS___ILOGBF_H = 0xA80 // 2688 - SYS_ILOGBL = 0xA81 // 2689 - SYS___ILOGBL_B = 0xA82 // 2690 - SYS___ILOGBL_H = 0xA83 // 2691 - SYS_LLRINT = 0xA84 // 2692 - SYS___LLRINT_B = 0xA85 // 2693 - SYS___LLRINT_H = 0xA86 // 2694 - SYS_LLRINTF = 0xA87 // 2695 - SYS___LLRINTF_B = 0xA88 // 2696 - SYS___LLRINTF_H = 0xA89 // 2697 - SYS_LLRINTL = 0xA8A // 2698 - SYS___LLRINTL_B = 0xA8B // 2699 - SYS___LLRINTL_H = 0xA8C // 2700 - SYS_LLROUND = 0xA8D // 2701 - SYS___LLROUND_B = 0xA8E // 2702 - SYS___LLROUND_H = 0xA8F // 2703 - SYS_LLROUNDF = 0xA90 // 2704 - SYS___LLROUNDF_B = 0xA91 // 2705 - SYS___LLROUNDF_H = 0xA92 // 2706 - SYS_LLROUNDL = 0xA93 // 2707 - SYS___LLROUNDL_B = 0xA94 // 2708 - SYS___LLROUNDL_H = 0xA95 // 2709 - SYS_LOGBF = 0xA96 // 2710 - SYS___LOGBF_B = 0xA97 // 2711 - SYS___LOGBF_H = 0xA98 // 2712 - SYS_LOGBL = 0xA99 // 2713 - SYS___LOGBL_B = 0xA9A // 2714 - SYS___LOGBL_H = 0xA9B // 2715 - SYS_LRINT = 0xA9C // 2716 - SYS___LRINT_B = 0xA9D // 2717 - SYS___LRINT_H = 0xA9E // 2718 - SYS_LRINTF = 0xA9F // 2719 - SYS___LRINTF_B = 0xAA0 // 2720 - SYS___LRINTF_H = 0xAA1 // 2721 - SYS_LRINTL = 0xAA2 // 2722 - SYS___LRINTL_B = 0xAA3 // 2723 - SYS___LRINTL_H = 0xAA4 // 2724 - SYS_LROUNDL = 0xAA5 // 2725 - SYS___LROUNDL_B = 0xAA6 // 2726 - SYS___LROUNDL_H = 0xAA7 // 2727 - SYS_NAN = 0xAA8 // 2728 - SYS___NAN_B = 0xAA9 // 2729 - SYS_NANF = 0xAAA // 2730 - SYS___NANF_B = 0xAAB // 2731 - SYS_NANL = 0xAAC // 2732 - SYS___NANL_B = 0xAAD // 2733 - SYS_NEARBYINT = 0xAAE // 2734 - SYS___NEARBYINT_B = 0xAAF // 2735 - SYS___NEARBYINT_H = 0xAB0 // 2736 - SYS_NEARBYINTF = 0xAB1 // 2737 - SYS___NEARBYINTF_B = 0xAB2 // 2738 - SYS___NEARBYINTF_H = 0xAB3 // 2739 - SYS_NEARBYINTL = 0xAB4 // 2740 - SYS___NEARBYINTL_B = 0xAB5 // 2741 - SYS___NEARBYINTL_H = 0xAB6 // 2742 - SYS_NEXTAFTERF = 0xAB7 // 2743 - SYS___NEXTAFTERF_B = 0xAB8 // 2744 - SYS___NEXTAFTERF_H = 0xAB9 // 2745 - SYS_NEXTAFTERL = 0xABA // 2746 - SYS___NEXTAFTERL_B = 0xABB // 2747 - SYS___NEXTAFTERL_H = 0xABC // 2748 - SYS_NEXTTOWARD = 0xABD // 2749 - SYS___NEXTTOWARD_B = 0xABE // 2750 - SYS___NEXTTOWARD_H = 0xABF // 2751 - SYS_NEXTTOWARDF = 0xAC0 // 2752 - SYS___NEXTTOWARDF_B = 0xAC1 // 2753 - SYS___NEXTTOWARDF_H = 0xAC2 // 2754 - SYS_NEXTTOWARDL = 0xAC3 // 2755 - SYS___NEXTTOWARDL_B = 0xAC4 // 2756 - SYS___NEXTTOWARDL_H = 0xAC5 // 2757 - SYS___REMAINDERF_H = 0xAC6 // 2758 - SYS___REMAINDERL_H = 0xAC7 // 2759 - SYS___REMQUO_H = 0xAC8 // 2760 - SYS___REMQUOF_H = 0xAC9 // 2761 - SYS___REMQUOL_H = 0xACA // 2762 - SYS_RINTF = 0xACB // 2763 - SYS___RINTF_B = 0xACC // 2764 - SYS_RINTL = 0xACD // 2765 - SYS___RINTL_B = 0xACE // 2766 - SYS_ROUND = 0xACF // 2767 - SYS___ROUND_B = 0xAD0 // 2768 - SYS___ROUND_H = 0xAD1 // 2769 - SYS_ROUNDF = 0xAD2 // 2770 - SYS___ROUNDF_B = 0xAD3 // 2771 - SYS___ROUNDF_H = 0xAD4 // 2772 - SYS_ROUNDL = 0xAD5 // 2773 - SYS___ROUNDL_B = 0xAD6 // 2774 - SYS___ROUNDL_H = 0xAD7 // 2775 - SYS_SCALBLN = 0xAD8 // 2776 - SYS___SCALBLN_B = 0xAD9 // 2777 - SYS___SCALBLN_H = 0xADA // 2778 - SYS_SCALBLNF = 0xADB // 2779 - SYS___SCALBLNF_B = 0xADC // 2780 - SYS___SCALBLNF_H = 0xADD // 2781 - SYS_SCALBLNL = 0xADE // 2782 - SYS___SCALBLNL_B = 0xADF // 2783 - SYS___SCALBLNL_H = 0xAE0 // 2784 - SYS___SCALBN_B = 0xAE1 // 2785 - SYS___SCALBN_H = 0xAE2 // 2786 - SYS_SCALBNF = 0xAE3 // 2787 - SYS___SCALBNF_B = 0xAE4 // 2788 - SYS___SCALBNF_H = 0xAE5 // 2789 - SYS_SCALBNL = 0xAE6 // 2790 - SYS___SCALBNL_B = 0xAE7 // 2791 - SYS___SCALBNL_H = 0xAE8 // 2792 - SYS___TGAMMAL_H = 0xAE9 // 2793 - SYS_FECLEAREXCEPT = 0xAEA // 2794 - SYS_FEGETENV = 0xAEB // 2795 - SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 - SYS_FEGETROUND = 0xAED // 2797 - SYS_FEHOLDEXCEPT = 0xAEE // 2798 - SYS_FERAISEEXCEPT = 0xAEF // 2799 - SYS_FESETENV = 0xAF0 // 2800 - SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 - SYS_FESETROUND = 0xAF2 // 2802 - SYS_FETESTEXCEPT = 0xAF3 // 2803 - SYS_FEUPDATEENV = 0xAF4 // 2804 - SYS___COPYSIGN_H = 0xAF5 // 2805 - SYS___HYPOTF_H = 0xAF6 // 2806 - SYS___HYPOTL_H = 0xAF7 // 2807 - SYS___CLASS = 0xAFA // 2810 - SYS___CLASS_B = 0xAFB // 2811 - SYS___CLASS_H = 0xAFC // 2812 - SYS___ISBLANK_A = 0xB2E // 2862 - SYS___ISWBLANK_A = 0xB2F // 2863 - SYS___LROUND_FIXUP = 0xB30 // 2864 - SYS___LROUNDF_FIXUP = 0xB31 // 2865 - SYS_SCHED_YIELD = 0xB32 // 2866 - SYS_STRERROR_R = 0xB33 // 2867 - SYS_UNSETENV = 0xB34 // 2868 - SYS___LGAMMA_H_C99 = 0xB38 // 2872 - SYS___LGAMMA_B_C99 = 0xB39 // 2873 - SYS___LGAMMA_R_C99 = 0xB3A // 2874 - SYS___FTELL2 = 0xB3B // 2875 - SYS___FSEEK2 = 0xB3C // 2876 - SYS___STATIC_REINIT = 0xB3D // 2877 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 - SYS___TGAMMA_H_C99 = 0xB78 // 2936 - SYS___TGAMMAF_H_C99 = 0xB79 // 2937 - SYS___LE_TRACEBACK = 0xB7A // 2938 - SYS___MUST_STAY_CLEAN = 0xB7C // 2940 - SYS___O_ENV = 0xB7D // 2941 - SYS_ACOSD32 = 0xB7E // 2942 - SYS_ACOSD64 = 0xB7F // 2943 - SYS_ACOSD128 = 0xB80 // 2944 - SYS_ACOSHD32 = 0xB81 // 2945 - SYS_ACOSHD64 = 0xB82 // 2946 - SYS_ACOSHD128 = 0xB83 // 2947 - SYS_ASIND32 = 0xB84 // 2948 - SYS_ASIND64 = 0xB85 // 2949 - SYS_ASIND128 = 0xB86 // 2950 - SYS_ASINHD32 = 0xB87 // 2951 - SYS_ASINHD64 = 0xB88 // 2952 - SYS_ASINHD128 = 0xB89 // 2953 - SYS_ATAND32 = 0xB8A // 2954 - SYS_ATAND64 = 0xB8B // 2955 - SYS_ATAND128 = 0xB8C // 2956 - SYS_ATAN2D32 = 0xB8D // 2957 - SYS_ATAN2D64 = 0xB8E // 2958 - SYS_ATAN2D128 = 0xB8F // 2959 - SYS_ATANHD32 = 0xB90 // 2960 - SYS_ATANHD64 = 0xB91 // 2961 - SYS_ATANHD128 = 0xB92 // 2962 - SYS_CBRTD32 = 0xB93 // 2963 - SYS_CBRTD64 = 0xB94 // 2964 - SYS_CBRTD128 = 0xB95 // 2965 - SYS_CEILD32 = 0xB96 // 2966 - SYS_CEILD64 = 0xB97 // 2967 - SYS_CEILD128 = 0xB98 // 2968 - SYS___CLASS2 = 0xB99 // 2969 - SYS___CLASS2_B = 0xB9A // 2970 - SYS___CLASS2_H = 0xB9B // 2971 - SYS_COPYSIGND32 = 0xB9C // 2972 - SYS_COPYSIGND64 = 0xB9D // 2973 - SYS_COPYSIGND128 = 0xB9E // 2974 - SYS_COSD32 = 0xB9F // 2975 - SYS_COSD64 = 0xBA0 // 2976 - SYS_COSD128 = 0xBA1 // 2977 - SYS_COSHD32 = 0xBA2 // 2978 - SYS_COSHD64 = 0xBA3 // 2979 - SYS_COSHD128 = 0xBA4 // 2980 - SYS_ERFD32 = 0xBA5 // 2981 - SYS_ERFD64 = 0xBA6 // 2982 - SYS_ERFD128 = 0xBA7 // 2983 - SYS_ERFCD32 = 0xBA8 // 2984 - SYS_ERFCD64 = 0xBA9 // 2985 - SYS_ERFCD128 = 0xBAA // 2986 - SYS_EXPD32 = 0xBAB // 2987 - SYS_EXPD64 = 0xBAC // 2988 - SYS_EXPD128 = 0xBAD // 2989 - SYS_EXP2D32 = 0xBAE // 2990 - SYS_EXP2D64 = 0xBAF // 2991 - SYS_EXP2D128 = 0xBB0 // 2992 - SYS_EXPM1D32 = 0xBB1 // 2993 - SYS_EXPM1D64 = 0xBB2 // 2994 - SYS_EXPM1D128 = 0xBB3 // 2995 - SYS_FABSD32 = 0xBB4 // 2996 - SYS_FABSD64 = 0xBB5 // 2997 - SYS_FABSD128 = 0xBB6 // 2998 - SYS_FDIMD32 = 0xBB7 // 2999 - SYS_FDIMD64 = 0xBB8 // 3000 - SYS_FDIMD128 = 0xBB9 // 3001 - SYS_FE_DEC_GETROUND = 0xBBA // 3002 - SYS_FE_DEC_SETROUND = 0xBBB // 3003 - SYS_FLOORD32 = 0xBBC // 3004 - SYS_FLOORD64 = 0xBBD // 3005 - SYS_FLOORD128 = 0xBBE // 3006 - SYS_FMAD32 = 0xBBF // 3007 - SYS_FMAD64 = 0xBC0 // 3008 - SYS_FMAD128 = 0xBC1 // 3009 - SYS_FMAXD32 = 0xBC2 // 3010 - SYS_FMAXD64 = 0xBC3 // 3011 - SYS_FMAXD128 = 0xBC4 // 3012 - SYS_FMIND32 = 0xBC5 // 3013 - SYS_FMIND64 = 0xBC6 // 3014 - SYS_FMIND128 = 0xBC7 // 3015 - SYS_FMODD32 = 0xBC8 // 3016 - SYS_FMODD64 = 0xBC9 // 3017 - SYS_FMODD128 = 0xBCA // 3018 - SYS___FP_CAST_D = 0xBCB // 3019 - SYS_FREXPD32 = 0xBCC // 3020 - SYS_FREXPD64 = 0xBCD // 3021 - SYS_FREXPD128 = 0xBCE // 3022 - SYS_HYPOTD32 = 0xBCF // 3023 - SYS_HYPOTD64 = 0xBD0 // 3024 - SYS_HYPOTD128 = 0xBD1 // 3025 - SYS_ILOGBD32 = 0xBD2 // 3026 - SYS_ILOGBD64 = 0xBD3 // 3027 - SYS_ILOGBD128 = 0xBD4 // 3028 - SYS_LDEXPD32 = 0xBD5 // 3029 - SYS_LDEXPD64 = 0xBD6 // 3030 - SYS_LDEXPD128 = 0xBD7 // 3031 - SYS_LGAMMAD32 = 0xBD8 // 3032 - SYS_LGAMMAD64 = 0xBD9 // 3033 - SYS_LGAMMAD128 = 0xBDA // 3034 - SYS_LLRINTD32 = 0xBDB // 3035 - SYS_LLRINTD64 = 0xBDC // 3036 - SYS_LLRINTD128 = 0xBDD // 3037 - SYS_LLROUNDD32 = 0xBDE // 3038 - SYS_LLROUNDD64 = 0xBDF // 3039 - SYS_LLROUNDD128 = 0xBE0 // 3040 - SYS_LOGD32 = 0xBE1 // 3041 - SYS_LOGD64 = 0xBE2 // 3042 - SYS_LOGD128 = 0xBE3 // 3043 - SYS_LOG10D32 = 0xBE4 // 3044 - SYS_LOG10D64 = 0xBE5 // 3045 - SYS_LOG10D128 = 0xBE6 // 3046 - SYS_LOG1PD32 = 0xBE7 // 3047 - SYS_LOG1PD64 = 0xBE8 // 3048 - SYS_LOG1PD128 = 0xBE9 // 3049 - SYS_LOG2D32 = 0xBEA // 3050 - SYS_LOG2D64 = 0xBEB // 3051 - SYS_LOG2D128 = 0xBEC // 3052 - SYS_LOGBD32 = 0xBED // 3053 - SYS_LOGBD64 = 0xBEE // 3054 - SYS_LOGBD128 = 0xBEF // 3055 - SYS_LRINTD32 = 0xBF0 // 3056 - SYS_LRINTD64 = 0xBF1 // 3057 - SYS_LRINTD128 = 0xBF2 // 3058 - SYS_LROUNDD32 = 0xBF3 // 3059 - SYS_LROUNDD64 = 0xBF4 // 3060 - SYS_LROUNDD128 = 0xBF5 // 3061 - SYS_MODFD32 = 0xBF6 // 3062 - SYS_MODFD64 = 0xBF7 // 3063 - SYS_MODFD128 = 0xBF8 // 3064 - SYS_NAND32 = 0xBF9 // 3065 - SYS_NAND64 = 0xBFA // 3066 - SYS_NAND128 = 0xBFB // 3067 - SYS_NEARBYINTD32 = 0xBFC // 3068 - SYS_NEARBYINTD64 = 0xBFD // 3069 - SYS_NEARBYINTD128 = 0xBFE // 3070 - SYS_NEXTAFTERD32 = 0xBFF // 3071 - SYS_NEXTAFTERD64 = 0xC00 // 3072 - SYS_NEXTAFTERD128 = 0xC01 // 3073 - SYS_NEXTTOWARDD32 = 0xC02 // 3074 - SYS_NEXTTOWARDD64 = 0xC03 // 3075 - SYS_NEXTTOWARDD128 = 0xC04 // 3076 - SYS_POWD32 = 0xC05 // 3077 - SYS_POWD64 = 0xC06 // 3078 - SYS_POWD128 = 0xC07 // 3079 - SYS_QUANTIZED32 = 0xC08 // 3080 - SYS_QUANTIZED64 = 0xC09 // 3081 - SYS_QUANTIZED128 = 0xC0A // 3082 - SYS_REMAINDERD32 = 0xC0B // 3083 - SYS_REMAINDERD64 = 0xC0C // 3084 - SYS_REMAINDERD128 = 0xC0D // 3085 - SYS___REMQUOD32 = 0xC0E // 3086 - SYS___REMQUOD64 = 0xC0F // 3087 - SYS___REMQUOD128 = 0xC10 // 3088 - SYS_RINTD32 = 0xC11 // 3089 - SYS_RINTD64 = 0xC12 // 3090 - SYS_RINTD128 = 0xC13 // 3091 - SYS_ROUNDD32 = 0xC14 // 3092 - SYS_ROUNDD64 = 0xC15 // 3093 - SYS_ROUNDD128 = 0xC16 // 3094 - SYS_SAMEQUANTUMD32 = 0xC17 // 3095 - SYS_SAMEQUANTUMD64 = 0xC18 // 3096 - SYS_SAMEQUANTUMD128 = 0xC19 // 3097 - SYS_SCALBLND32 = 0xC1A // 3098 - SYS_SCALBLND64 = 0xC1B // 3099 - SYS_SCALBLND128 = 0xC1C // 3100 - SYS_SCALBND32 = 0xC1D // 3101 - SYS_SCALBND64 = 0xC1E // 3102 - SYS_SCALBND128 = 0xC1F // 3103 - SYS_SIND32 = 0xC20 // 3104 - SYS_SIND64 = 0xC21 // 3105 - SYS_SIND128 = 0xC22 // 3106 - SYS_SINHD32 = 0xC23 // 3107 - SYS_SINHD64 = 0xC24 // 3108 - SYS_SINHD128 = 0xC25 // 3109 - SYS_SQRTD32 = 0xC26 // 3110 - SYS_SQRTD64 = 0xC27 // 3111 - SYS_SQRTD128 = 0xC28 // 3112 - SYS_STRTOD32 = 0xC29 // 3113 - SYS_STRTOD64 = 0xC2A // 3114 - SYS_STRTOD128 = 0xC2B // 3115 - SYS_TAND32 = 0xC2C // 3116 - SYS_TAND64 = 0xC2D // 3117 - SYS_TAND128 = 0xC2E // 3118 - SYS_TANHD32 = 0xC2F // 3119 - SYS_TANHD64 = 0xC30 // 3120 - SYS_TANHD128 = 0xC31 // 3121 - SYS_TGAMMAD32 = 0xC32 // 3122 - SYS_TGAMMAD64 = 0xC33 // 3123 - SYS_TGAMMAD128 = 0xC34 // 3124 - SYS_TRUNCD32 = 0xC3E // 3134 - SYS_TRUNCD64 = 0xC3F // 3135 - SYS_TRUNCD128 = 0xC40 // 3136 - SYS_WCSTOD32 = 0xC41 // 3137 - SYS_WCSTOD64 = 0xC42 // 3138 - SYS_WCSTOD128 = 0xC43 // 3139 - SYS___CODEPAGE_INFO = 0xC64 // 3172 - SYS_POSIX_OPENPT = 0xC66 // 3174 - SYS_PSELECT = 0xC67 // 3175 - SYS_SOCKATMARK = 0xC68 // 3176 - SYS_AIO_FSYNC = 0xC69 // 3177 - SYS_LIO_LISTIO = 0xC6A // 3178 - SYS___ATANPID32 = 0xC6B // 3179 - SYS___ATANPID64 = 0xC6C // 3180 - SYS___ATANPID128 = 0xC6D // 3181 - SYS___COSPID32 = 0xC6E // 3182 - SYS___COSPID64 = 0xC6F // 3183 - SYS___COSPID128 = 0xC70 // 3184 - SYS___SINPID32 = 0xC71 // 3185 - SYS___SINPID64 = 0xC72 // 3186 - SYS___SINPID128 = 0xC73 // 3187 - SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 - SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 - SYS_SETSOURCEFILTER = 0xC78 // 3192 - SYS_GETSOURCEFILTER = 0xC79 // 3193 - SYS_FWRITE_UNLOCKED = 0xC7A // 3194 - SYS_FREAD_UNLOCKED = 0xC7B // 3195 - SYS_FGETS_UNLOCKED = 0xC7C // 3196 - SYS_GETS_UNLOCKED = 0xC7D // 3197 - SYS_FPUTS_UNLOCKED = 0xC7E // 3198 - SYS_PUTS_UNLOCKED = 0xC7F // 3199 - SYS_FGETC_UNLOCKED = 0xC80 // 3200 - SYS_FPUTC_UNLOCKED = 0xC81 // 3201 - SYS_DLADDR = 0xC82 // 3202 - SYS_SHM_OPEN = 0xC8C // 3212 - SYS_SHM_UNLINK = 0xC8D // 3213 - SYS___CLASS2F = 0xC91 // 3217 - SYS___CLASS2L = 0xC92 // 3218 - SYS___CLASS2F_B = 0xC93 // 3219 - SYS___CLASS2F_H = 0xC94 // 3220 - SYS___CLASS2L_B = 0xC95 // 3221 - SYS___CLASS2L_H = 0xC96 // 3222 - SYS___CLASS2D32 = 0xC97 // 3223 - SYS___CLASS2D64 = 0xC98 // 3224 - SYS___CLASS2D128 = 0xC99 // 3225 - SYS___TOCSNAME2 = 0xC9A // 3226 - SYS___D1TOP = 0xC9B // 3227 - SYS___D2TOP = 0xC9C // 3228 - SYS___D4TOP = 0xC9D // 3229 - SYS___PTOD1 = 0xC9E // 3230 - SYS___PTOD2 = 0xC9F // 3231 - SYS___PTOD4 = 0xCA0 // 3232 - SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 - SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 - SYS_FEOF_UNLOCKED = 0xCA3 // 3235 - SYS_FERROR_UNLOCKED = 0xCA4 // 3236 - SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 - SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 - SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 - SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 - SYS_FILENO_UNLOCKED = 0xCA9 // 3241 - SYS_FLDATA_UNLOCKED = 0xCAA // 3242 - SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 - SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 - SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 - SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 - SYS_FSCANF_UNLOCKED = 0xCAF // 3247 - SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 - SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 - SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 - SYS_FTELL_UNLOCKED = 0xCB4 // 3252 - SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 - SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 - SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 - SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 - SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 - SYS_GETWC_UNLOCKED = 0xCBB // 3259 - SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 - SYS_PERROR_UNLOCKED = 0xCBD // 3261 - SYS_PRINTF_UNLOCKED = 0xCBE // 3262 - SYS_PUTWC_UNLOCKED = 0xCBF // 3263 - SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 - SYS_REWIND_UNLOCKED = 0xCC1 // 3265 - SYS_SCANF_UNLOCKED = 0xCC2 // 3266 - SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 - SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 - SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 - SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 - SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 - SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 - SYS_VSCANF_UNLOCKED = 0xCCF // 3279 - SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 - SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 - SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 - SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 - SYS_ASCTIME64 = 0xCD7 // 3287 - SYS_ASCTIME64_R = 0xCD8 // 3288 - SYS_CTIME64 = 0xCD9 // 3289 - SYS_CTIME64_R = 0xCDA // 3290 - SYS_DIFFTIME64 = 0xCDB // 3291 - SYS_GMTIME64 = 0xCDC // 3292 - SYS_GMTIME64_R = 0xCDD // 3293 - SYS_LOCALTIME64 = 0xCDE // 3294 - SYS_LOCALTIME64_R = 0xCDF // 3295 - SYS_MKTIME64 = 0xCE0 // 3296 - SYS_TIME64 = 0xCE1 // 3297 - SYS___LOGIN_APPLID = 0xCE2 // 3298 - SYS___PASSWD_APPLID = 0xCE3 // 3299 - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 - SYS___GETTHENT = 0xCE5 // 3301 - SYS_FREEIFADDRS = 0xCE6 // 3302 - SYS_GETIFADDRS = 0xCE7 // 3303 - SYS_POSIX_FALLOCATE = 0xCE8 // 3304 - SYS_POSIX_MEMALIGN = 0xCE9 // 3305 - SYS_SIZEOF_ALLOC = 0xCEA // 3306 - SYS_RESIZE_ALLOC = 0xCEB // 3307 - SYS_FREAD_NOUPDATE = 0xCEC // 3308 - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 - SYS_FGETPOS64 = 0xCEE // 3310 - SYS_FSEEK64 = 0xCEF // 3311 - SYS_FSEEKO64 = 0xCF0 // 3312 - SYS_FSETPOS64 = 0xCF1 // 3313 - SYS_FTELL64 = 0xCF2 // 3314 - SYS_FTELLO64 = 0xCF3 // 3315 - SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 - SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 - SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 - SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 - SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 - SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 - SYS_FOPEN_UNLOCKED = 0xCFA // 3322 - SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 - SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 - SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 - SYS___MOSERVICES = 0xD3D // 3389 - SYS___GETTOD = 0xD3E // 3390 - SYS_C16RTOMB = 0xD40 // 3392 - SYS_C32RTOMB = 0xD41 // 3393 - SYS_MBRTOC16 = 0xD42 // 3394 - SYS_MBRTOC32 = 0xD43 // 3395 - SYS_QUANTEXPD32 = 0xD44 // 3396 - SYS_QUANTEXPD64 = 0xD45 // 3397 - SYS_QUANTEXPD128 = 0xD46 // 3398 - SYS___LOCALE_CTL = 0xD47 // 3399 - SYS___SMF_RECORD2 = 0xD48 // 3400 - SYS_FOPEN64 = 0xD49 // 3401 - SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 - SYS_FREOPEN64 = 0xD4B // 3403 - SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 - SYS_TMPFILE64 = 0xD4D // 3405 - SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 - SYS_GETDATE64 = 0xD4F // 3407 - SYS_GETTIMEOFDAY64 = 0xD50 // 3408 - SYS_BIND2ADDRSEL = 0xD59 // 3417 - SYS_INET6_IS_SRCADDR = 0xD5A // 3418 - SYS___GETGRGID1 = 0xD5B // 3419 - SYS___GETGRNAM1 = 0xD5C // 3420 - SYS___FBUFSIZE = 0xD60 // 3424 - SYS___FPENDING = 0xD61 // 3425 - SYS___FLBF = 0xD62 // 3426 - SYS___FREADABLE = 0xD63 // 3427 - SYS___FWRITABLE = 0xD64 // 3428 - SYS___FREADING = 0xD65 // 3429 - SYS___FWRITING = 0xD66 // 3430 - SYS___FSETLOCKING = 0xD67 // 3431 - SYS__FLUSHLBF = 0xD68 // 3432 - SYS___FPURGE = 0xD69 // 3433 - SYS___FREADAHEAD = 0xD6A // 3434 - SYS___FSETERR = 0xD6B // 3435 - SYS___FPENDING_UNLOCKED = 0xD6C // 3436 - SYS___FREADING_UNLOCKED = 0xD6D // 3437 - SYS___FWRITING_UNLOCKED = 0xD6E // 3438 - SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 - SYS___FPURGE_UNLOCKED = 0xD70 // 3440 - SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 - SYS___LE_CEEGTJS = 0xD72 // 3442 - SYS___LE_RECORD_DUMP = 0xD73 // 3443 - SYS_FSTAT64 = 0xD74 // 3444 - SYS_LSTAT64 = 0xD75 // 3445 - SYS_STAT64 = 0xD76 // 3446 - SYS___READDIR2_64 = 0xD77 // 3447 - SYS___OPEN_STAT64 = 0xD78 // 3448 - SYS_FTW64 = 0xD79 // 3449 - SYS_NFTW64 = 0xD7A // 3450 - SYS_UTIME64 = 0xD7B // 3451 - SYS_UTIMES64 = 0xD7C // 3452 - SYS___GETIPC64 = 0xD7D // 3453 - SYS_MSGCTL64 = 0xD7E // 3454 - SYS_SEMCTL64 = 0xD7F // 3455 - SYS_SHMCTL64 = 0xD80 // 3456 - SYS_MSGXRCV64 = 0xD81 // 3457 - SYS___MGXR64 = 0xD81 // 3457 - SYS_W_GETPSENT64 = 0xD82 // 3458 - SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 - SYS_FTIME64 = 0xD85 // 3461 - SYS_GETUTXENT64 = 0xD86 // 3462 - SYS_GETUTXID64 = 0xD87 // 3463 - SYS_GETUTXLINE64 = 0xD88 // 3464 - SYS_PUTUTXLINE64 = 0xD89 // 3465 - SYS_NEWLOCALE = 0xD8A // 3466 - SYS_FREELOCALE = 0xD8B // 3467 - SYS_USELOCALE = 0xD8C // 3468 - SYS_DUPLOCALE = 0xD8D // 3469 - SYS___CHATTR64 = 0xD9C // 3484 - SYS___LCHATTR64 = 0xD9D // 3485 - SYS___FCHATTR64 = 0xD9E // 3486 - SYS_____CHATTR64_A = 0xD9F // 3487 - SYS_____LCHATTR64_A = 0xDA0 // 3488 - SYS___LE_CEEUSGD = 0xDA1 // 3489 - SYS___LE_IFAM_CON = 0xDA2 // 3490 - SYS___LE_IFAM_DSC = 0xDA3 // 3491 - SYS___LE_IFAM_GET = 0xDA4 // 3492 - SYS___LE_IFAM_QRY = 0xDA5 // 3493 - SYS_ALIGNED_ALLOC = 0xDA6 // 3494 - SYS_ACCEPT4 = 0xDA7 // 3495 - SYS___ACCEPT4_A = 0xDA8 // 3496 - SYS_COPYFILERANGE = 0xDA9 // 3497 - SYS_GETLINE = 0xDAA // 3498 - SYS___GETLINE_A = 0xDAB // 3499 - SYS_DIRFD = 0xDAC // 3500 - SYS_CLOCK_GETTIME = 0xDAD // 3501 - SYS_DUP3 = 0xDAE // 3502 - SYS_EPOLL_CREATE = 0xDAF // 3503 - SYS_EPOLL_CREATE1 = 0xDB0 // 3504 - SYS_EPOLL_CTL = 0xDB1 // 3505 - SYS_EPOLL_WAIT = 0xDB2 // 3506 - SYS_EPOLL_PWAIT = 0xDB3 // 3507 - SYS_EVENTFD = 0xDB4 // 3508 - SYS_STATFS = 0xDB5 // 3509 - SYS___STATFS_A = 0xDB6 // 3510 - SYS_FSTATFS = 0xDB7 // 3511 - SYS_INOTIFY_INIT = 0xDB8 // 3512 - SYS_INOTIFY_INIT1 = 0xDB9 // 3513 - SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 - SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 - SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 - SYS_PIPE2 = 0xDBD // 3517 - SYS_PIVOT_ROOT = 0xDBE // 3518 - SYS___PIVOT_ROOT_A = 0xDBF // 3519 - SYS_PRCTL = 0xDC0 // 3520 - SYS_PRLIMIT = 0xDC1 // 3521 - SYS_SETHOSTNAME = 0xDC2 // 3522 - SYS___SETHOSTNAME_A = 0xDC3 // 3523 - SYS_SETRESUID = 0xDC4 // 3524 - SYS_SETRESGID = 0xDC5 // 3525 - SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 - SYS_FLOCK = 0xDC7 // 3527 - SYS_FGETXATTR = 0xDC8 // 3528 - SYS___FGETXATTR_A = 0xDC9 // 3529 - SYS_FLISTXATTR = 0xDCA // 3530 - SYS___FLISTXATTR_A = 0xDCB // 3531 - SYS_FREMOVEXATTR = 0xDCC // 3532 - SYS___FREMOVEXATTR_A = 0xDCD // 3533 - SYS_FSETXATTR = 0xDCE // 3534 - SYS___FSETXATTR_A = 0xDCF // 3535 - SYS_GETXATTR = 0xDD0 // 3536 - SYS___GETXATTR_A = 0xDD1 // 3537 - SYS_LGETXATTR = 0xDD2 // 3538 - SYS___LGETXATTR_A = 0xDD3 // 3539 - SYS_LISTXATTR = 0xDD4 // 3540 - SYS___LISTXATTR_A = 0xDD5 // 3541 - SYS_LLISTXATTR = 0xDD6 // 3542 - SYS___LLISTXATTR_A = 0xDD7 // 3543 - SYS_LREMOVEXATTR = 0xDD8 // 3544 - SYS___LREMOVEXATTR_A = 0xDD9 // 3545 - SYS_LSETXATTR = 0xDDA // 3546 - SYS___LSETXATTR_A = 0xDDB // 3547 - SYS_REMOVEXATTR = 0xDDC // 3548 - SYS___REMOVEXATTR_A = 0xDDD // 3549 - SYS_SETXATTR = 0xDDE // 3550 - SYS___SETXATTR_A = 0xDDF // 3551 - SYS_FDATASYNC = 0xDE0 // 3552 - SYS_SYNCFS = 0xDE1 // 3553 - SYS_FUTIMES = 0xDE2 // 3554 - SYS_FUTIMESAT = 0xDE3 // 3555 - SYS___FUTIMESAT_A = 0xDE4 // 3556 - SYS_LUTIMES = 0xDE5 // 3557 - SYS___LUTIMES_A = 0xDE6 // 3558 - SYS_INET_ATON = 0xDE7 // 3559 - SYS_GETRANDOM = 0xDE8 // 3560 - SYS_GETTID = 0xDE9 // 3561 - SYS_MEMFD_CREATE = 0xDEA // 3562 - SYS___MEMFD_CREATE_A = 0xDEB // 3563 - SYS_FACCESSAT = 0xDEC // 3564 - SYS___FACCESSAT_A = 0xDED // 3565 - SYS_FCHMODAT = 0xDEE // 3566 - SYS___FCHMODAT_A = 0xDEF // 3567 - SYS_FCHOWNAT = 0xDF0 // 3568 - SYS___FCHOWNAT_A = 0xDF1 // 3569 - SYS_FSTATAT = 0xDF2 // 3570 - SYS___FSTATAT_A = 0xDF3 // 3571 - SYS_LINKAT = 0xDF4 // 3572 - SYS___LINKAT_A = 0xDF5 // 3573 - SYS_MKDIRAT = 0xDF6 // 3574 - SYS___MKDIRAT_A = 0xDF7 // 3575 - SYS_MKFIFOAT = 0xDF8 // 3576 - SYS___MKFIFOAT_A = 0xDF9 // 3577 - SYS_MKNODAT = 0xDFA // 3578 - SYS___MKNODAT_A = 0xDFB // 3579 - SYS_OPENAT = 0xDFC // 3580 - SYS___OPENAT_A = 0xDFD // 3581 - SYS_READLINKAT = 0xDFE // 3582 - SYS___READLINKAT_A = 0xDFF // 3583 - SYS_RENAMEAT = 0xE00 // 3584 - SYS___RENAMEAT_A = 0xE01 // 3585 - SYS_RENAMEAT2 = 0xE02 // 3586 - SYS___RENAMEAT2_A = 0xE03 // 3587 - SYS_SYMLINKAT = 0xE04 // 3588 - SYS___SYMLINKAT_A = 0xE05 // 3589 - SYS_UNLINKAT = 0xE06 // 3590 - SYS___UNLINKAT_A = 0xE07 // 3591 - SYS_SYSINFO = 0xE08 // 3592 - SYS_WAIT4 = 0xE0A // 3594 - SYS_CLONE = 0xE0B // 3595 - SYS_UNSHARE = 0xE0C // 3596 - SYS_SETNS = 0xE0D // 3597 - SYS_CAPGET = 0xE0E // 3598 - SYS_CAPSET = 0xE0F // 3599 - SYS_STRCHRNUL = 0xE10 // 3600 - SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 - SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 - SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 - SYS___INET_ATON_A = 0xE15 // 3605 - SYS_MOUNT1 = 0xE16 // 3606 - SYS___MOUNT1_A = 0xE17 // 3607 - SYS_UMOUNT1 = 0xE18 // 3608 - SYS___UMOUNT1_A = 0xE19 // 3609 - SYS_UMOUNT2 = 0xE1A // 3610 - SYS___UMOUNT2_A = 0xE1B // 3611 - SYS___PRCTL_A = 0xE1C // 3612 - SYS_LOCALTIME_R2 = 0xE1D // 3613 - SYS___LOCALTIME_R2_A = 0xE1E // 3614 - SYS_OPENAT2 = 0xE1F // 3615 - SYS___OPENAT2_A = 0xE20 // 3616 - SYS___LE_CEEMICT = 0xE21 // 3617 - SYS_GETENTROPY = 0xE22 // 3618 - SYS_NANOSLEEP = 0xE23 // 3619 - SYS_UTIMENSAT = 0xE24 // 3620 - SYS___UTIMENSAT_A = 0xE25 // 3621 - SYS_ASPRINTF = 0xE26 // 3622 - SYS___ASPRINTF_A = 0xE27 // 3623 - SYS_VASPRINTF = 0xE28 // 3624 - SYS___VASPRINTF_A = 0xE29 // 3625 - SYS_DPRINTF = 0xE2A // 3626 - SYS___DPRINTF_A = 0xE2B // 3627 - SYS_GETOPT_LONG = 0xE2C // 3628 - SYS___GETOPT_LONG_A = 0xE2D // 3629 - SYS_PSIGNAL = 0xE2E // 3630 - SYS___PSIGNAL_A = 0xE2F // 3631 - SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 - SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 - SYS_FSTATAT_O = 0xE32 // 3634 - SYS___FSTATAT_O_A = 0xE33 // 3635 - SYS_FSTATAT64 = 0xE34 // 3636 - SYS___FSTATAT64_A = 0xE35 // 3637 - SYS___CHATTRAT = 0xE36 // 3638 - SYS_____CHATTRAT_A = 0xE37 // 3639 - SYS___CHATTRAT64 = 0xE38 // 3640 - SYS_____CHATTRAT64_A = 0xE39 // 3641 - SYS_MADVISE = 0xE3A // 3642 - SYS___AUTHENTICATE = 0xE3B // 3643 +// TODO: auto-generate. +const ( + SYS_ACOSD128 = 0xB80 + SYS_ACOSD32 = 0xB7E + SYS_ACOSD64 = 0xB7F + SYS_ACOSHD128 = 0xB83 + SYS_ACOSHD32 = 0xB81 + SYS_ACOSHD64 = 0xB82 + SYS_AIO_FSYNC = 0xC69 + SYS_ASCTIME = 0x0AE + SYS_ASCTIME64 = 0xCD7 + SYS_ASCTIME64_R = 0xCD8 + SYS_ASIND128 = 0xB86 + SYS_ASIND32 = 0xB84 + SYS_ASIND64 = 0xB85 + SYS_ASINHD128 = 0xB89 + SYS_ASINHD32 = 0xB87 + SYS_ASINHD64 = 0xB88 + SYS_ATAN2D128 = 0xB8F + SYS_ATAN2D32 = 0xB8D + SYS_ATAN2D64 = 0xB8E + SYS_ATAND128 = 0xB8C + SYS_ATAND32 = 0xB8A + SYS_ATAND64 = 0xB8B + SYS_ATANHD128 = 0xB92 + SYS_ATANHD32 = 0xB90 + SYS_ATANHD64 = 0xB91 + SYS_BIND2ADDRSEL = 0xD59 + SYS_C16RTOMB = 0xD40 + SYS_C32RTOMB = 0xD41 + SYS_CBRTD128 = 0xB95 + SYS_CBRTD32 = 0xB93 + SYS_CBRTD64 = 0xB94 + SYS_CEILD128 = 0xB98 + SYS_CEILD32 = 0xB96 + SYS_CEILD64 = 0xB97 + SYS_CLEARENV = 0x0C9 + SYS_CLEARERR_UNLOCKED = 0xCA1 + SYS_CLOCK = 0x0AA + SYS_CLOGL = 0xA00 + SYS_CLRMEMF = 0x0BD + SYS_CONJ = 0xA03 + SYS_CONJF = 0xA06 + SYS_CONJL = 0xA09 + SYS_COPYSIGND128 = 0xB9E + SYS_COPYSIGND32 = 0xB9C + SYS_COPYSIGND64 = 0xB9D + SYS_COSD128 = 0xBA1 + SYS_COSD32 = 0xB9F + SYS_COSD64 = 0xBA0 + SYS_COSHD128 = 0xBA4 + SYS_COSHD32 = 0xBA2 + SYS_COSHD64 = 0xBA3 + SYS_CPOW = 0xA0C + SYS_CPOWF = 0xA0F + SYS_CPOWL = 0xA12 + SYS_CPROJ = 0xA15 + SYS_CPROJF = 0xA18 + SYS_CPROJL = 0xA1B + SYS_CREAL = 0xA1E + SYS_CREALF = 0xA21 + SYS_CREALL = 0xA24 + SYS_CSIN = 0xA27 + SYS_CSINF = 0xA2A + SYS_CSINH = 0xA30 + SYS_CSINHF = 0xA33 + SYS_CSINHL = 0xA36 + SYS_CSINL = 0xA2D + SYS_CSNAP = 0x0C5 + SYS_CSQRT = 0xA39 + SYS_CSQRTF = 0xA3C + SYS_CSQRTL = 0xA3F + SYS_CTAN = 0xA42 + SYS_CTANF = 0xA45 + SYS_CTANH = 0xA4B + SYS_CTANHF = 0xA4E + SYS_CTANHL = 0xA51 + SYS_CTANL = 0xA48 + SYS_CTIME = 0x0AB + SYS_CTIME64 = 0xCD9 + SYS_CTIME64_R = 0xCDA + SYS_CTRACE = 0x0C6 + SYS_DIFFTIME = 0x0A7 + SYS_DIFFTIME64 = 0xCDB + SYS_DLADDR = 0xC82 + SYS_DYNALLOC = 0x0C3 + SYS_DYNFREE = 0x0C2 + SYS_ERFCD128 = 0xBAA + SYS_ERFCD32 = 0xBA8 + SYS_ERFCD64 = 0xBA9 + SYS_ERFD128 = 0xBA7 + SYS_ERFD32 = 0xBA5 + SYS_ERFD64 = 0xBA6 + SYS_EXP2D128 = 0xBB0 + SYS_EXP2D32 = 0xBAE + SYS_EXP2D64 = 0xBAF + SYS_EXPD128 = 0xBAD + SYS_EXPD32 = 0xBAB + SYS_EXPD64 = 0xBAC + SYS_EXPM1D128 = 0xBB3 + SYS_EXPM1D32 = 0xBB1 + SYS_EXPM1D64 = 0xBB2 + SYS_FABSD128 = 0xBB6 + SYS_FABSD32 = 0xBB4 + SYS_FABSD64 = 0xBB5 + SYS_FDELREC_UNLOCKED = 0xCA2 + SYS_FDIMD128 = 0xBB9 + SYS_FDIMD32 = 0xBB7 + SYS_FDIMD64 = 0xBB8 + SYS_FDOPEN_UNLOCKED = 0xCFC + SYS_FECLEAREXCEPT = 0xAEA + SYS_FEGETENV = 0xAEB + SYS_FEGETEXCEPTFLAG = 0xAEC + SYS_FEGETROUND = 0xAED + SYS_FEHOLDEXCEPT = 0xAEE + SYS_FEOF_UNLOCKED = 0xCA3 + SYS_FERAISEEXCEPT = 0xAEF + SYS_FERROR_UNLOCKED = 0xCA4 + SYS_FESETENV = 0xAF0 + SYS_FESETEXCEPTFLAG = 0xAF1 + SYS_FESETROUND = 0xAF2 + SYS_FETCHEP = 0x0BF + SYS_FETESTEXCEPT = 0xAF3 + SYS_FEUPDATEENV = 0xAF4 + SYS_FE_DEC_GETROUND = 0xBBA + SYS_FE_DEC_SETROUND = 0xBBB + SYS_FFLUSH_UNLOCKED = 0xCA5 + SYS_FGETC_UNLOCKED = 0xC80 + SYS_FGETPOS64 = 0xCEE + SYS_FGETPOS64_UNLOCKED = 0xCF4 + SYS_FGETPOS_UNLOCKED = 0xCA6 + SYS_FGETS_UNLOCKED = 0xC7C + SYS_FGETWC_UNLOCKED = 0xCA7 + SYS_FGETWS_UNLOCKED = 0xCA8 + SYS_FILENO_UNLOCKED = 0xCA9 + SYS_FLDATA = 0x0C1 + SYS_FLDATA_UNLOCKED = 0xCAA + SYS_FLOCATE_UNLOCKED = 0xCAB + SYS_FLOORD128 = 0xBBE + SYS_FLOORD32 = 0xBBC + SYS_FLOORD64 = 0xBBD + SYS_FMA = 0xA63 + SYS_FMAD128 = 0xBC1 + SYS_FMAD32 = 0xBBF + SYS_FMAD64 = 0xBC0 + SYS_FMAF = 0xA66 + SYS_FMAL = 0xA69 + SYS_FMAX = 0xA6C + SYS_FMAXD128 = 0xBC4 + SYS_FMAXD32 = 0xBC2 + SYS_FMAXD64 = 0xBC3 + SYS_FMAXF = 0xA6F + SYS_FMAXL = 0xA72 + SYS_FMIN = 0xA75 + SYS_FMIND128 = 0xBC7 + SYS_FMIND32 = 0xBC5 + SYS_FMIND64 = 0xBC6 + SYS_FMINF = 0xA78 + SYS_FMINL = 0xA7B + SYS_FMODD128 = 0xBCA + SYS_FMODD32 = 0xBC8 + SYS_FMODD64 = 0xBC9 + SYS_FOPEN64 = 0xD49 + SYS_FOPEN64_UNLOCKED = 0xD4A + SYS_FOPEN_UNLOCKED = 0xCFA + SYS_FPRINTF_UNLOCKED = 0xCAC + SYS_FPUTC_UNLOCKED = 0xC81 + SYS_FPUTS_UNLOCKED = 0xC7E + SYS_FPUTWC_UNLOCKED = 0xCAD + SYS_FPUTWS_UNLOCKED = 0xCAE + SYS_FREAD_NOUPDATE = 0xCEC + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED + SYS_FREAD_UNLOCKED = 0xC7B + SYS_FREEIFADDRS = 0xCE6 + SYS_FREOPEN64 = 0xD4B + SYS_FREOPEN64_UNLOCKED = 0xD4C + SYS_FREOPEN_UNLOCKED = 0xCFB + SYS_FREXPD128 = 0xBCE + SYS_FREXPD32 = 0xBCC + SYS_FREXPD64 = 0xBCD + SYS_FSCANF_UNLOCKED = 0xCAF + SYS_FSEEK64 = 0xCEF + SYS_FSEEK64_UNLOCKED = 0xCF5 + SYS_FSEEKO64 = 0xCF0 + SYS_FSEEKO64_UNLOCKED = 0xCF6 + SYS_FSEEKO_UNLOCKED = 0xCB1 + SYS_FSEEK_UNLOCKED = 0xCB0 + SYS_FSETPOS64 = 0xCF1 + SYS_FSETPOS64_UNLOCKED = 0xCF7 + SYS_FSETPOS_UNLOCKED = 0xCB3 + SYS_FTELL64 = 0xCF2 + SYS_FTELL64_UNLOCKED = 0xCF8 + SYS_FTELLO64 = 0xCF3 + SYS_FTELLO64_UNLOCKED = 0xCF9 + SYS_FTELLO_UNLOCKED = 0xCB5 + SYS_FTELL_UNLOCKED = 0xCB4 + SYS_FUPDATE = 0x0B5 + SYS_FUPDATE_UNLOCKED = 0xCB7 + SYS_FWIDE_UNLOCKED = 0xCB8 + SYS_FWPRINTF_UNLOCKED = 0xCB9 + SYS_FWRITE_UNLOCKED = 0xC7A + SYS_FWSCANF_UNLOCKED = 0xCBA + SYS_GETDATE64 = 0xD4F + SYS_GETIFADDRS = 0xCE7 + SYS_GETIPV4SOURCEFILTER = 0xC77 + SYS_GETSOURCEFILTER = 0xC79 + SYS_GETSYNTX = 0x0FD + SYS_GETS_UNLOCKED = 0xC7D + SYS_GETTIMEOFDAY64 = 0xD50 + SYS_GETWCHAR_UNLOCKED = 0xCBC + SYS_GETWC_UNLOCKED = 0xCBB + SYS_GMTIME = 0x0B0 + SYS_GMTIME64 = 0xCDC + SYS_GMTIME64_R = 0xCDD + SYS_HYPOTD128 = 0xBD1 + SYS_HYPOTD32 = 0xBCF + SYS_HYPOTD64 = 0xBD0 + SYS_ILOGBD128 = 0xBD4 + SYS_ILOGBD32 = 0xBD2 + SYS_ILOGBD64 = 0xBD3 + SYS_ILOGBF = 0xA7E + SYS_ILOGBL = 0xA81 + SYS_INET6_IS_SRCADDR = 0xD5A + SYS_ISBLANK = 0x0FE + SYS_ISWALNUM = 0x0FF + SYS_LDEXPD128 = 0xBD7 + SYS_LDEXPD32 = 0xBD5 + SYS_LDEXPD64 = 0xBD6 + SYS_LGAMMAD128 = 0xBDA + SYS_LGAMMAD32 = 0xBD8 + SYS_LGAMMAD64 = 0xBD9 + SYS_LIO_LISTIO = 0xC6A + SYS_LLRINT = 0xA84 + SYS_LLRINTD128 = 0xBDD + SYS_LLRINTD32 = 0xBDB + SYS_LLRINTD64 = 0xBDC + SYS_LLRINTF = 0xA87 + SYS_LLRINTL = 0xA8A + SYS_LLROUND = 0xA8D + SYS_LLROUNDD128 = 0xBE0 + SYS_LLROUNDD32 = 0xBDE + SYS_LLROUNDD64 = 0xBDF + SYS_LLROUNDF = 0xA90 + SYS_LLROUNDL = 0xA93 + SYS_LOCALTIM = 0x0B1 + SYS_LOCALTIME = 0x0B1 + SYS_LOCALTIME64 = 0xCDE + SYS_LOCALTIME64_R = 0xCDF + SYS_LOG10D128 = 0xBE6 + SYS_LOG10D32 = 0xBE4 + SYS_LOG10D64 = 0xBE5 + SYS_LOG1PD128 = 0xBE9 + SYS_LOG1PD32 = 0xBE7 + SYS_LOG1PD64 = 0xBE8 + SYS_LOG2D128 = 0xBEC + SYS_LOG2D32 = 0xBEA + SYS_LOG2D64 = 0xBEB + SYS_LOGBD128 = 0xBEF + SYS_LOGBD32 = 0xBED + SYS_LOGBD64 = 0xBEE + SYS_LOGBF = 0xA96 + SYS_LOGBL = 0xA99 + SYS_LOGD128 = 0xBE3 + SYS_LOGD32 = 0xBE1 + SYS_LOGD64 = 0xBE2 + SYS_LRINT = 0xA9C + SYS_LRINTD128 = 0xBF2 + SYS_LRINTD32 = 0xBF0 + SYS_LRINTD64 = 0xBF1 + SYS_LRINTF = 0xA9F + SYS_LRINTL = 0xAA2 + SYS_LROUNDD128 = 0xBF5 + SYS_LROUNDD32 = 0xBF3 + SYS_LROUNDD64 = 0xBF4 + SYS_LROUNDL = 0xAA5 + SYS_MBLEN = 0x0AF + SYS_MBRTOC16 = 0xD42 + SYS_MBRTOC32 = 0xD43 + SYS_MEMSET = 0x0A3 + SYS_MKTIME = 0x0AC + SYS_MKTIME64 = 0xCE0 + SYS_MODFD128 = 0xBF8 + SYS_MODFD32 = 0xBF6 + SYS_MODFD64 = 0xBF7 + SYS_NAN = 0xAA8 + SYS_NAND128 = 0xBFB + SYS_NAND32 = 0xBF9 + SYS_NAND64 = 0xBFA + SYS_NANF = 0xAAA + SYS_NANL = 0xAAC + SYS_NEARBYINT = 0xAAE + SYS_NEARBYINTD128 = 0xBFE + SYS_NEARBYINTD32 = 0xBFC + SYS_NEARBYINTD64 = 0xBFD + SYS_NEARBYINTF = 0xAB1 + SYS_NEARBYINTL = 0xAB4 + SYS_NEXTAFTERD128 = 0xC01 + SYS_NEXTAFTERD32 = 0xBFF + SYS_NEXTAFTERD64 = 0xC00 + SYS_NEXTAFTERF = 0xAB7 + SYS_NEXTAFTERL = 0xABA + SYS_NEXTTOWARD = 0xABD + SYS_NEXTTOWARDD128 = 0xC04 + SYS_NEXTTOWARDD32 = 0xC02 + SYS_NEXTTOWARDD64 = 0xC03 + SYS_NEXTTOWARDF = 0xAC0 + SYS_NEXTTOWARDL = 0xAC3 + SYS_NL_LANGINFO = 0x0FC + SYS_PERROR_UNLOCKED = 0xCBD + SYS_POSIX_FALLOCATE = 0xCE8 + SYS_POSIX_MEMALIGN = 0xCE9 + SYS_POSIX_OPENPT = 0xC66 + SYS_POWD128 = 0xC07 + SYS_POWD32 = 0xC05 + SYS_POWD64 = 0xC06 + SYS_PRINTF_UNLOCKED = 0xCBE + SYS_PSELECT = 0xC67 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 + SYS_PUTS_UNLOCKED = 0xC7F + SYS_PUTWCHAR_UNLOCKED = 0xCC0 + SYS_PUTWC_UNLOCKED = 0xCBF + SYS_QUANTEXPD128 = 0xD46 + SYS_QUANTEXPD32 = 0xD44 + SYS_QUANTEXPD64 = 0xD45 + SYS_QUANTIZED128 = 0xC0A + SYS_QUANTIZED32 = 0xC08 + SYS_QUANTIZED64 = 0xC09 + SYS_REMAINDERD128 = 0xC0D + SYS_REMAINDERD32 = 0xC0B + SYS_REMAINDERD64 = 0xC0C + SYS_RESIZE_ALLOC = 0xCEB + SYS_REWIND_UNLOCKED = 0xCC1 + SYS_RINTD128 = 0xC13 + SYS_RINTD32 = 0xC11 + SYS_RINTD64 = 0xC12 + SYS_RINTF = 0xACB + SYS_RINTL = 0xACD + SYS_ROUND = 0xACF + SYS_ROUNDD128 = 0xC16 + SYS_ROUNDD32 = 0xC14 + SYS_ROUNDD64 = 0xC15 + SYS_ROUNDF = 0xAD2 + SYS_ROUNDL = 0xAD5 + SYS_SAMEQUANTUMD128 = 0xC19 + SYS_SAMEQUANTUMD32 = 0xC17 + SYS_SAMEQUANTUMD64 = 0xC18 + SYS_SCALBLN = 0xAD8 + SYS_SCALBLND128 = 0xC1C + SYS_SCALBLND32 = 0xC1A + SYS_SCALBLND64 = 0xC1B + SYS_SCALBLNF = 0xADB + SYS_SCALBLNL = 0xADE + SYS_SCALBND128 = 0xC1F + SYS_SCALBND32 = 0xC1D + SYS_SCALBND64 = 0xC1E + SYS_SCALBNF = 0xAE3 + SYS_SCALBNL = 0xAE6 + SYS_SCANF_UNLOCKED = 0xCC2 + SYS_SCHED_YIELD = 0xB32 + SYS_SETENV = 0x0C8 + SYS_SETIPV4SOURCEFILTER = 0xC76 + SYS_SETSOURCEFILTER = 0xC78 + SYS_SHM_OPEN = 0xC8C + SYS_SHM_UNLINK = 0xC8D + SYS_SIND128 = 0xC22 + SYS_SIND32 = 0xC20 + SYS_SIND64 = 0xC21 + SYS_SINHD128 = 0xC25 + SYS_SINHD32 = 0xC23 + SYS_SINHD64 = 0xC24 + SYS_SIZEOF_ALLOC = 0xCEA + SYS_SOCKATMARK = 0xC68 + SYS_SQRTD128 = 0xC28 + SYS_SQRTD32 = 0xC26 + SYS_SQRTD64 = 0xC27 + SYS_STRCHR = 0x0A0 + SYS_STRCSPN = 0x0A1 + SYS_STRERROR = 0x0A8 + SYS_STRERROR_R = 0xB33 + SYS_STRFTIME = 0x0B2 + SYS_STRLEN = 0x0A9 + SYS_STRPBRK = 0x0A2 + SYS_STRSPN = 0x0A4 + SYS_STRSTR = 0x0A5 + SYS_STRTOD128 = 0xC2B + SYS_STRTOD32 = 0xC29 + SYS_STRTOD64 = 0xC2A + SYS_STRTOK = 0x0A6 + SYS_TAND128 = 0xC2E + SYS_TAND32 = 0xC2C + SYS_TAND64 = 0xC2D + SYS_TANHD128 = 0xC31 + SYS_TANHD32 = 0xC2F + SYS_TANHD64 = 0xC30 + SYS_TGAMMAD128 = 0xC34 + SYS_TGAMMAD32 = 0xC32 + SYS_TGAMMAD64 = 0xC33 + SYS_TIME = 0x0AD + SYS_TIME64 = 0xCE1 + SYS_TMPFILE64 = 0xD4D + SYS_TMPFILE64_UNLOCKED = 0xD4E + SYS_TMPFILE_UNLOCKED = 0xCFD + SYS_TRUNCD128 = 0xC40 + SYS_TRUNCD32 = 0xC3E + SYS_TRUNCD64 = 0xC3F + SYS_UNGETC_UNLOCKED = 0xCC3 + SYS_UNGETWC_UNLOCKED = 0xCC4 + SYS_UNSETENV = 0xB34 + SYS_VFPRINTF_UNLOCKED = 0xCC5 + SYS_VFSCANF_UNLOCKED = 0xCC7 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 + SYS_VFWSCANF_UNLOCKED = 0xCCB + SYS_VPRINTF_UNLOCKED = 0xCCD + SYS_VSCANF_UNLOCKED = 0xCCF + SYS_VWPRINTF_UNLOCKED = 0xCD1 + SYS_VWSCANF_UNLOCKED = 0xCD3 + SYS_WCSTOD128 = 0xC43 + SYS_WCSTOD32 = 0xC41 + SYS_WCSTOD64 = 0xC42 + SYS_WPRINTF_UNLOCKED = 0xCD5 + SYS_WSCANF_UNLOCKED = 0xCD6 + SYS__FLUSHLBF = 0xD68 + SYS__FLUSHLBF_UNLOCKED = 0xD6F + SYS___ACOSHF_H = 0xA54 + SYS___ACOSHL_H = 0xA55 + SYS___ASINHF_H = 0xA56 + SYS___ASINHL_H = 0xA57 + SYS___ATANPID128 = 0xC6D + SYS___ATANPID32 = 0xC6B + SYS___ATANPID64 = 0xC6C + SYS___CBRTF_H = 0xA58 + SYS___CBRTL_H = 0xA59 + SYS___CDUMP = 0x0C4 + SYS___CLASS = 0xAFA + SYS___CLASS2 = 0xB99 + SYS___CLASS2D128 = 0xC99 + SYS___CLASS2D32 = 0xC97 + SYS___CLASS2D64 = 0xC98 + SYS___CLASS2F = 0xC91 + SYS___CLASS2F_B = 0xC93 + SYS___CLASS2F_H = 0xC94 + SYS___CLASS2L = 0xC92 + SYS___CLASS2L_B = 0xC95 + SYS___CLASS2L_H = 0xC96 + SYS___CLASS2_B = 0xB9A + SYS___CLASS2_H = 0xB9B + SYS___CLASS_B = 0xAFB + SYS___CLASS_H = 0xAFC + SYS___CLOGL_B = 0xA01 + SYS___CLOGL_H = 0xA02 + SYS___CLRENV = 0x0C9 + SYS___CLRMF = 0x0BD + SYS___CODEPAGE_INFO = 0xC64 + SYS___CONJF_B = 0xA07 + SYS___CONJF_H = 0xA08 + SYS___CONJL_B = 0xA0A + SYS___CONJL_H = 0xA0B + SYS___CONJ_B = 0xA04 + SYS___CONJ_H = 0xA05 + SYS___COPYSIGN_B = 0xA5A + SYS___COPYSIGN_H = 0xAF5 + SYS___COSPID128 = 0xC70 + SYS___COSPID32 = 0xC6E + SYS___COSPID64 = 0xC6F + SYS___CPOWF_B = 0xA10 + SYS___CPOWF_H = 0xA11 + SYS___CPOWL_B = 0xA13 + SYS___CPOWL_H = 0xA14 + SYS___CPOW_B = 0xA0D + SYS___CPOW_H = 0xA0E + SYS___CPROJF_B = 0xA19 + SYS___CPROJF_H = 0xA1A + SYS___CPROJL_B = 0xA1C + SYS___CPROJL_H = 0xA1D + SYS___CPROJ_B = 0xA16 + SYS___CPROJ_H = 0xA17 + SYS___CREALF_B = 0xA22 + SYS___CREALF_H = 0xA23 + SYS___CREALL_B = 0xA25 + SYS___CREALL_H = 0xA26 + SYS___CREAL_B = 0xA1F + SYS___CREAL_H = 0xA20 + SYS___CSINF_B = 0xA2B + SYS___CSINF_H = 0xA2C + SYS___CSINHF_B = 0xA34 + SYS___CSINHF_H = 0xA35 + SYS___CSINHL_B = 0xA37 + SYS___CSINHL_H = 0xA38 + SYS___CSINH_B = 0xA31 + SYS___CSINH_H = 0xA32 + SYS___CSINL_B = 0xA2E + SYS___CSINL_H = 0xA2F + SYS___CSIN_B = 0xA28 + SYS___CSIN_H = 0xA29 + SYS___CSNAP = 0x0C5 + SYS___CSQRTF_B = 0xA3D + SYS___CSQRTF_H = 0xA3E + SYS___CSQRTL_B = 0xA40 + SYS___CSQRTL_H = 0xA41 + SYS___CSQRT_B = 0xA3A + SYS___CSQRT_H = 0xA3B + SYS___CTANF_B = 0xA46 + SYS___CTANF_H = 0xA47 + SYS___CTANHF_B = 0xA4F + SYS___CTANHF_H = 0xA50 + SYS___CTANHL_B = 0xA52 + SYS___CTANHL_H = 0xA53 + SYS___CTANH_B = 0xA4C + SYS___CTANH_H = 0xA4D + SYS___CTANL_B = 0xA49 + SYS___CTANL_H = 0xA4A + SYS___CTAN_B = 0xA43 + SYS___CTAN_H = 0xA44 + SYS___CTEST = 0x0C7 + SYS___CTRACE = 0x0C6 + SYS___D1TOP = 0xC9B + SYS___D2TOP = 0xC9C + SYS___D4TOP = 0xC9D + SYS___DYNALL = 0x0C3 + SYS___DYNFRE = 0x0C2 + SYS___EXP2F_H = 0xA5E + SYS___EXP2L_H = 0xA5F + SYS___EXP2_H = 0xA5D + SYS___EXPM1F_H = 0xA5B + SYS___EXPM1L_H = 0xA5C + SYS___FBUFSIZE = 0xD60 + SYS___FLBF = 0xD62 + SYS___FLDATA = 0x0C1 + SYS___FMAF_B = 0xA67 + SYS___FMAF_H = 0xA68 + SYS___FMAL_B = 0xA6A + SYS___FMAL_H = 0xA6B + SYS___FMAXF_B = 0xA70 + SYS___FMAXF_H = 0xA71 + SYS___FMAXL_B = 0xA73 + SYS___FMAXL_H = 0xA74 + SYS___FMAX_B = 0xA6D + SYS___FMAX_H = 0xA6E + SYS___FMA_B = 0xA64 + SYS___FMA_H = 0xA65 + SYS___FMINF_B = 0xA79 + SYS___FMINF_H = 0xA7A + SYS___FMINL_B = 0xA7C + SYS___FMINL_H = 0xA7D + SYS___FMIN_B = 0xA76 + SYS___FMIN_H = 0xA77 + SYS___FPENDING = 0xD61 + SYS___FPENDING_UNLOCKED = 0xD6C + SYS___FPURGE = 0xD69 + SYS___FPURGE_UNLOCKED = 0xD70 + SYS___FP_CAST_D = 0xBCB + SYS___FREADABLE = 0xD63 + SYS___FREADAHEAD = 0xD6A + SYS___FREADAHEAD_UNLOCKED = 0xD71 + SYS___FREADING = 0xD65 + SYS___FREADING_UNLOCKED = 0xD6D + SYS___FSEEK2 = 0xB3C + SYS___FSETERR = 0xD6B + SYS___FSETLOCKING = 0xD67 + SYS___FTCHEP = 0x0BF + SYS___FTELL2 = 0xB3B + SYS___FUPDT = 0x0B5 + SYS___FWRITABLE = 0xD64 + SYS___FWRITING = 0xD66 + SYS___FWRITING_UNLOCKED = 0xD6E + SYS___GETCB = 0x0B4 + SYS___GETGRGID1 = 0xD5B + SYS___GETGRNAM1 = 0xD5C + SYS___GETTHENT = 0xCE5 + SYS___GETTOD = 0xD3E + SYS___HYPOTF_H = 0xAF6 + SYS___HYPOTL_H = 0xAF7 + SYS___ILOGBF_B = 0xA7F + SYS___ILOGBF_H = 0xA80 + SYS___ILOGBL_B = 0xA82 + SYS___ILOGBL_H = 0xA83 + SYS___ISBLANK_A = 0xB2E + SYS___ISBLNK = 0x0FE + SYS___ISWBLANK_A = 0xB2F + SYS___LE_CEEGTJS = 0xD72 + SYS___LE_TRACEBACK = 0xB7A + SYS___LGAMMAL_H = 0xA62 + SYS___LGAMMA_B_C99 = 0xB39 + SYS___LGAMMA_H_C99 = 0xB38 + SYS___LGAMMA_R_C99 = 0xB3A + SYS___LLRINTF_B = 0xA88 + SYS___LLRINTF_H = 0xA89 + SYS___LLRINTL_B = 0xA8B + SYS___LLRINTL_H = 0xA8C + SYS___LLRINT_B = 0xA85 + SYS___LLRINT_H = 0xA86 + SYS___LLROUNDF_B = 0xA91 + SYS___LLROUNDF_H = 0xA92 + SYS___LLROUNDL_B = 0xA94 + SYS___LLROUNDL_H = 0xA95 + SYS___LLROUND_B = 0xA8E + SYS___LLROUND_H = 0xA8F + SYS___LOCALE_CTL = 0xD47 + SYS___LOG1PF_H = 0xA60 + SYS___LOG1PL_H = 0xA61 + SYS___LOGBF_B = 0xA97 + SYS___LOGBF_H = 0xA98 + SYS___LOGBL_B = 0xA9A + SYS___LOGBL_H = 0xA9B + SYS___LOGIN_APPLID = 0xCE2 + SYS___LRINTF_B = 0xAA0 + SYS___LRINTF_H = 0xAA1 + SYS___LRINTL_B = 0xAA3 + SYS___LRINTL_H = 0xAA4 + SYS___LRINT_B = 0xA9D + SYS___LRINT_H = 0xA9E + SYS___LROUNDF_FIXUP = 0xB31 + SYS___LROUNDL_B = 0xAA6 + SYS___LROUNDL_H = 0xAA7 + SYS___LROUND_FIXUP = 0xB30 + SYS___MOSERVICES = 0xD3D + SYS___MUST_STAY_CLEAN = 0xB7C + SYS___NANF_B = 0xAAB + SYS___NANL_B = 0xAAD + SYS___NAN_B = 0xAA9 + SYS___NEARBYINTF_B = 0xAB2 + SYS___NEARBYINTF_H = 0xAB3 + SYS___NEARBYINTL_B = 0xAB5 + SYS___NEARBYINTL_H = 0xAB6 + SYS___NEARBYINT_B = 0xAAF + SYS___NEARBYINT_H = 0xAB0 + SYS___NEXTAFTERF_B = 0xAB8 + SYS___NEXTAFTERF_H = 0xAB9 + SYS___NEXTAFTERL_B = 0xABB + SYS___NEXTAFTERL_H = 0xABC + SYS___NEXTTOWARDF_B = 0xAC1 + SYS___NEXTTOWARDF_H = 0xAC2 + SYS___NEXTTOWARDL_B = 0xAC4 + SYS___NEXTTOWARDL_H = 0xAC5 + SYS___NEXTTOWARD_B = 0xABE + SYS___NEXTTOWARD_H = 0xABF + SYS___O_ENV = 0xB7D + SYS___PASSWD_APPLID = 0xCE3 + SYS___PTOD1 = 0xC9E + SYS___PTOD2 = 0xC9F + SYS___PTOD4 = 0xCA0 + SYS___REGCOMP_STD = 0x0EA + SYS___REMAINDERF_H = 0xAC6 + SYS___REMAINDERL_H = 0xAC7 + SYS___REMQUOD128 = 0xC10 + SYS___REMQUOD32 = 0xC0E + SYS___REMQUOD64 = 0xC0F + SYS___REMQUOF_H = 0xAC9 + SYS___REMQUOL_H = 0xACA + SYS___REMQUO_H = 0xAC8 + SYS___RINTF_B = 0xACC + SYS___RINTL_B = 0xACE + SYS___ROUNDF_B = 0xAD3 + SYS___ROUNDF_H = 0xAD4 + SYS___ROUNDL_B = 0xAD6 + SYS___ROUNDL_H = 0xAD7 + SYS___ROUND_B = 0xAD0 + SYS___ROUND_H = 0xAD1 + SYS___SCALBLNF_B = 0xADC + SYS___SCALBLNF_H = 0xADD + SYS___SCALBLNL_B = 0xADF + SYS___SCALBLNL_H = 0xAE0 + SYS___SCALBLN_B = 0xAD9 + SYS___SCALBLN_H = 0xADA + SYS___SCALBNF_B = 0xAE4 + SYS___SCALBNF_H = 0xAE5 + SYS___SCALBNL_B = 0xAE7 + SYS___SCALBNL_H = 0xAE8 + SYS___SCALBN_B = 0xAE1 + SYS___SCALBN_H = 0xAE2 + SYS___SETENV = 0x0C8 + SYS___SINPID128 = 0xC73 + SYS___SINPID32 = 0xC71 + SYS___SINPID64 = 0xC72 + SYS___SMF_RECORD2 = 0xD48 + SYS___STATIC_REINIT = 0xB3D + SYS___TGAMMAF_H_C99 = 0xB79 + SYS___TGAMMAL_H = 0xAE9 + SYS___TGAMMA_H_C99 = 0xB78 + SYS___TOCSNAME2 = 0xC9A + SYS_CEIL = 0x01F + SYS_CHAUDIT = 0x1E0 + SYS_EXP = 0x01A + SYS_FCHAUDIT = 0x1E1 + SYS_FREXP = 0x01D + SYS_GETGROUPSBYNAME = 0x1E2 + SYS_GETPWUID = 0x1A0 + SYS_GETUID = 0x1A1 + SYS_ISATTY = 0x1A3 + SYS_KILL = 0x1A4 + SYS_LDEXP = 0x01E + SYS_LINK = 0x1A5 + SYS_LOG10 = 0x01C + SYS_LSEEK = 0x1A6 + SYS_LSTAT = 0x1A7 + SYS_MKDIR = 0x1A8 + SYS_MKFIFO = 0x1A9 + SYS_MKNOD = 0x1AA + SYS_MODF = 0x01B + SYS_MOUNT = 0x1AB + SYS_OPEN = 0x1AC + SYS_OPENDIR = 0x1AD + SYS_PATHCONF = 0x1AE + SYS_PAUSE = 0x1AF + SYS_PIPE = 0x1B0 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED + SYS_PTHREAD_ATTR_INIT = 0x1E6 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC + SYS_PTHREAD_CANCEL = 0x1EE + SYS_PTHREAD_CLEANUP_POP = 0x1F0 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 + SYS_PTHREAD_COND_DESTROY = 0x1F4 + SYS_PTHREAD_COND_INIT = 0x1F3 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 + SYS_PTHREAD_COND_WAIT = 0x1F7 + SYS_PTHREAD_CREATE = 0x1F9 + SYS_PTHREAD_DETACH = 0x1FA + SYS_PTHREAD_EQUAL = 0x1FB + SYS_PTHREAD_EXIT = 0x1E4 + SYS_PTHREAD_GETSPECIFIC = 0x1FC + SYS_PTHREAD_JOIN = 0x1FD + SYS_PTHREAD_KEY_CREATE = 0x1FE + SYS_PTHREAD_KILL = 0x1E5 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF + SYS_READ = 0x1B2 + SYS_READDIR = 0x1B3 + SYS_READLINK = 0x1B4 + SYS_REWINDDIR = 0x1B5 + SYS_RMDIR = 0x1B6 + SYS_SETEGID = 0x1B7 + SYS_SETEUID = 0x1B8 + SYS_SETGID = 0x1B9 + SYS_SETPGID = 0x1BA + SYS_SETSID = 0x1BB + SYS_SETUID = 0x1BC + SYS_SIGACTION = 0x1BD + SYS_SIGADDSET = 0x1BE + SYS_SIGDELSET = 0x1BF + SYS_SIGEMPTYSET = 0x1C0 + SYS_SIGFILLSET = 0x1C1 + SYS_SIGISMEMBER = 0x1C2 + SYS_SIGLONGJMP = 0x1C3 + SYS_SIGPENDING = 0x1C4 + SYS_SIGPROCMASK = 0x1C5 + SYS_SIGSETJMP = 0x1C6 + SYS_SIGSUSPEND = 0x1C7 + SYS_SIGWAIT = 0x1E3 + SYS_SLEEP = 0x1C8 + SYS_STAT = 0x1C9 + SYS_SYMLINK = 0x1CB + SYS_SYSCONF = 0x1CC + SYS_TCDRAIN = 0x1CD + SYS_TCFLOW = 0x1CE + SYS_TCFLUSH = 0x1CF + SYS_TCGETATTR = 0x1D0 + SYS_TCGETPGRP = 0x1D1 + SYS_TCSENDBREAK = 0x1D2 + SYS_TCSETATTR = 0x1D3 + SYS_TCSETPGRP = 0x1D4 + SYS_TIMES = 0x1D5 + SYS_TTYNAME = 0x1D6 + SYS_TZSET = 0x1D7 + SYS_UMASK = 0x1D8 + SYS_UMOUNT = 0x1D9 + SYS_UNAME = 0x1DA + SYS_UNLINK = 0x1DB + SYS_UTIME = 0x1DC + SYS_WAIT = 0x1DD + SYS_WAITPID = 0x1DE + SYS_WRITE = 0x1DF + SYS_W_GETPSENT = 0x1B1 + SYS_W_IOCTL = 0x1A2 + SYS_W_STATFS = 0x1CA + SYS_A64L = 0x2EF + SYS_BCMP = 0x2B9 + SYS_BCOPY = 0x2BA + SYS_BZERO = 0x2BB + SYS_CATCLOSE = 0x2B6 + SYS_CATGETS = 0x2B7 + SYS_CATOPEN = 0x2B8 + SYS_CRYPT = 0x2AC + SYS_DBM_CLEARERR = 0x2F7 + SYS_DBM_CLOSE = 0x2F8 + SYS_DBM_DELETE = 0x2F9 + SYS_DBM_ERROR = 0x2FA + SYS_DBM_FETCH = 0x2FB + SYS_DBM_FIRSTKEY = 0x2FC + SYS_DBM_NEXTKEY = 0x2FD + SYS_DBM_OPEN = 0x2FE + SYS_DBM_STORE = 0x2FF + SYS_DRAND48 = 0x2B2 + SYS_ENCRYPT = 0x2AD + SYS_ENDUTXENT = 0x2E1 + SYS_ERAND48 = 0x2B3 + SYS_ERF = 0x02C + SYS_ERFC = 0x02D + SYS_FCHDIR = 0x2D9 + SYS_FFS = 0x2BC + SYS_FMTMSG = 0x2E5 + SYS_FSTATVFS = 0x2B4 + SYS_FTIME = 0x2F5 + SYS_GAMMA = 0x02E + SYS_GETDATE = 0x2A6 + SYS_GETPAGESIZE = 0x2D8 + SYS_GETTIMEOFDAY = 0x2F6 + SYS_GETUTXENT = 0x2E0 + SYS_GETUTXID = 0x2E2 + SYS_GETUTXLINE = 0x2E3 + SYS_HCREATE = 0x2C6 + SYS_HDESTROY = 0x2C7 + SYS_HSEARCH = 0x2C8 + SYS_HYPOT = 0x02B + SYS_INDEX = 0x2BD + SYS_INITSTATE = 0x2C2 + SYS_INSQUE = 0x2CF + SYS_ISASCII = 0x2ED + SYS_JRAND48 = 0x2E6 + SYS_L64A = 0x2F0 + SYS_LCONG48 = 0x2EA + SYS_LFIND = 0x2C9 + SYS_LRAND48 = 0x2E7 + SYS_LSEARCH = 0x2CA + SYS_MEMCCPY = 0x2D4 + SYS_MRAND48 = 0x2E8 + SYS_NRAND48 = 0x2E9 + SYS_PCLOSE = 0x2D2 + SYS_POPEN = 0x2D1 + SYS_PUTUTXLINE = 0x2E4 + SYS_RANDOM = 0x2C4 + SYS_REMQUE = 0x2D0 + SYS_RINDEX = 0x2BE + SYS_SEED48 = 0x2EC + SYS_SETKEY = 0x2AE + SYS_SETSTATE = 0x2C3 + SYS_SETUTXENT = 0x2DF + SYS_SRAND48 = 0x2EB + SYS_SRANDOM = 0x2C5 + SYS_STATVFS = 0x2B5 + SYS_STRCASECMP = 0x2BF + SYS_STRDUP = 0x2C0 + SYS_STRNCASECMP = 0x2C1 + SYS_SWAB = 0x2D3 + SYS_TDELETE = 0x2CB + SYS_TFIND = 0x2CC + SYS_TOASCII = 0x2EE + SYS_TSEARCH = 0x2CD + SYS_TWALK = 0x2CE + SYS_UALARM = 0x2F1 + SYS_USLEEP = 0x2F2 + SYS_WAIT3 = 0x2A7 + SYS_WAITID = 0x2A8 + SYS_Y1 = 0x02A + SYS___ATOE = 0x2DB + SYS___ATOE_L = 0x2DC + SYS___CATTRM = 0x2A9 + SYS___CNVBLK = 0x2AF + SYS___CRYTRM = 0x2B0 + SYS___DLGHT = 0x2A1 + SYS___ECRTRM = 0x2B1 + SYS___ETOA = 0x2DD + SYS___ETOA_L = 0x2DE + SYS___GDTRM = 0x2AA + SYS___OCLCK = 0x2DA + SYS___OPARGF = 0x2A2 + SYS___OPERRF = 0x2A5 + SYS___OPINDF = 0x2A4 + SYS___OPOPTF = 0x2A3 + SYS___RNDTRM = 0x2AB + SYS___SRCTRM = 0x2F4 + SYS___TZONE = 0x2A0 + SYS___UTXTRM = 0x2F3 + SYS_ASIN = 0x03E + SYS_ISXDIGIT = 0x03B + SYS_SETLOCAL = 0x03A + SYS_SETLOCALE = 0x03A + SYS_SIN = 0x03F + SYS_TOLOWER = 0x03C + SYS_TOUPPER = 0x03D + SYS_ACCEPT_AND_RECV = 0x4F7 + SYS_ATOL = 0x04E + SYS_CHECKSCH = 0x4BC + SYS_CHECKSCHENV = 0x4BC + SYS_CLEARERR = 0x04C + SYS_CONNECTS = 0x4B5 + SYS_CONNECTSERVER = 0x4B5 + SYS_CONNECTW = 0x4B4 + SYS_CONNECTWORKMGR = 0x4B4 + SYS_CONTINUE = 0x4B3 + SYS_CONTINUEWORKUNIT = 0x4B3 + SYS_COPYSIGN = 0x4C2 + SYS_CREATEWO = 0x4B2 + SYS_CREATEWORKUNIT = 0x4B2 + SYS_DELETEWO = 0x4B9 + SYS_DELETEWORKUNIT = 0x4B9 + SYS_DISCONNE = 0x4B6 + SYS_DISCONNECTSERVER = 0x4B6 + SYS_FEOF = 0x04D + SYS_FERROR = 0x04A + SYS_FINITE = 0x4C8 + SYS_GAMMA_R = 0x4E2 + SYS_JOINWORK = 0x4B7 + SYS_JOINWORKUNIT = 0x4B7 + SYS_LEAVEWOR = 0x4B8 + SYS_LEAVEWORKUNIT = 0x4B8 + SYS_LGAMMA_R = 0x4EB + SYS_MATHERR = 0x4D0 + SYS_PERROR = 0x04F + SYS_QUERYMET = 0x4BA + SYS_QUERYMETRICS = 0x4BA + SYS_QUERYSCH = 0x4BB + SYS_QUERYSCHENV = 0x4BB + SYS_REWIND = 0x04B + SYS_SCALBN = 0x4D4 + SYS_SIGNIFIC = 0x4D5 + SYS_SIGNIFICAND = 0x4D5 + SYS___ACOSH_B = 0x4DA + SYS___ACOS_B = 0x4D9 + SYS___ASINH_B = 0x4BE + SYS___ASIN_B = 0x4DB + SYS___ATAN2_B = 0x4DC + SYS___ATANH_B = 0x4DD + SYS___ATAN_B = 0x4BF + SYS___CBRT_B = 0x4C0 + SYS___CEIL_B = 0x4C1 + SYS___COSH_B = 0x4DE + SYS___COS_B = 0x4C3 + SYS___DGHT = 0x4A8 + SYS___ENVN = 0x4B0 + SYS___ERFC_B = 0x4C5 + SYS___ERF_B = 0x4C4 + SYS___EXPM1_B = 0x4C6 + SYS___EXP_B = 0x4DF + SYS___FABS_B = 0x4C7 + SYS___FLOOR_B = 0x4C9 + SYS___FMOD_B = 0x4E0 + SYS___FP_SETMODE = 0x4F8 + SYS___FREXP_B = 0x4CA + SYS___GAMMA_B = 0x4E1 + SYS___GDRR = 0x4A1 + SYS___HRRNO = 0x4A2 + SYS___HYPOT_B = 0x4E3 + SYS___ILOGB_B = 0x4CB + SYS___ISNAN_B = 0x4CC + SYS___J0_B = 0x4E4 + SYS___J1_B = 0x4E6 + SYS___JN_B = 0x4E8 + SYS___LDEXP_B = 0x4CD + SYS___LGAMMA_B = 0x4EA + SYS___LOG10_B = 0x4ED + SYS___LOG1P_B = 0x4CE + SYS___LOGB_B = 0x4CF + SYS___LOGIN = 0x4F5 + SYS___LOG_B = 0x4EC + SYS___MLOCKALL = 0x4B1 + SYS___MODF_B = 0x4D1 + SYS___NEXTAFTER_B = 0x4D2 + SYS___OPENDIR2 = 0x4F3 + SYS___OPEN_STAT = 0x4F6 + SYS___OPND = 0x4A5 + SYS___OPPT = 0x4A6 + SYS___OPRG = 0x4A3 + SYS___OPRR = 0x4A4 + SYS___PID_AFFINITY = 0x4BD + SYS___POW_B = 0x4EE + SYS___READDIR2 = 0x4F4 + SYS___REMAINDER_B = 0x4EF + SYS___RINT_B = 0x4D3 + SYS___SCALB_B = 0x4F0 + SYS___SIGACTIONSET = 0x4FB + SYS___SIGGM = 0x4A7 + SYS___SINH_B = 0x4F1 + SYS___SIN_B = 0x4D6 + SYS___SQRT_B = 0x4F2 + SYS___TANH_B = 0x4D8 + SYS___TAN_B = 0x4D7 + SYS___TRRNO = 0x4AF + SYS___TZNE = 0x4A9 + SYS___TZZN = 0x4AA + SYS___UCREATE = 0x4FC + SYS___UFREE = 0x4FE + SYS___UHEAPREPORT = 0x4FF + SYS___UMALLOC = 0x4FD + SYS___Y0_B = 0x4E5 + SYS___Y1_B = 0x4E7 + SYS___YN_B = 0x4E9 + SYS_ABORT = 0x05C + SYS_ASCTIME_R = 0x5E0 + SYS_ATEXIT = 0x05D + SYS_CONNECTE = 0x5AE + SYS_CONNECTEXPORTIMPORT = 0x5AE + SYS_CTIME_R = 0x5E1 + SYS_DN_COMP = 0x5DF + SYS_DN_EXPAND = 0x5DD + SYS_DN_SKIPNAME = 0x5DE + SYS_EXIT = 0x05A + SYS_EXPORTWO = 0x5A1 + SYS_EXPORTWORKUNIT = 0x5A1 + SYS_EXTRACTW = 0x5A5 + SYS_EXTRACTWORKUNIT = 0x5A5 + SYS_FSEEKO = 0x5C9 + SYS_FTELLO = 0x5C8 + SYS_GETGRGID_R = 0x5E7 + SYS_GETGRNAM_R = 0x5E8 + SYS_GETLOGIN_R = 0x5E9 + SYS_GETPWNAM_R = 0x5EA + SYS_GETPWUID_R = 0x5EB + SYS_GMTIME_R = 0x5E2 + SYS_IMPORTWO = 0x5A3 + SYS_IMPORTWORKUNIT = 0x5A3 + SYS_INET_NTOP = 0x5D3 + SYS_INET_PTON = 0x5D4 + SYS_LLABS = 0x5CE + SYS_LLDIV = 0x5CB + SYS_LOCALTIME_R = 0x5E3 + SYS_PTHREAD_ATFORK = 0x5ED + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 + SYS_PTHREAD_DETACH_U98 = 0x5FD + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE + SYS_PTHREAD_KEY_DELETE = 0x5F5 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 + SYS_PTHREAD_SIGMASK = 0x5F7 + SYS_QUERYENC = 0x5AD + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD + SYS_RAISE = 0x05E + SYS_RAND_R = 0x5E4 + SYS_READDIR_R = 0x5E6 + SYS_REALLOC = 0x05B + SYS_RES_INIT = 0x5D8 + SYS_RES_MKQUERY = 0x5D7 + SYS_RES_QUERY = 0x5D9 + SYS_RES_QUERYDOMAIN = 0x5DC + SYS_RES_SEARCH = 0x5DA + SYS_RES_SEND = 0x5DB + SYS_SETJMP = 0x05F + SYS_SIGQUEUE = 0x5A9 + SYS_STRTOK_R = 0x5E5 + SYS_STRTOLL = 0x5B0 + SYS_STRTOULL = 0x5B1 + SYS_TTYNAME_R = 0x5EC + SYS_UNDOEXPO = 0x5A2 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 + SYS_UNDOIMPO = 0x5A4 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 + SYS_WCSTOLL = 0x5CC + SYS_WCSTOULL = 0x5CD + SYS___ABORT = 0x05C + SYS___CONSOLE2 = 0x5D2 + SYS___CPL = 0x5A6 + SYS___DISCARDDATA = 0x5F8 + SYS___DSA_PREV = 0x5B2 + SYS___EP_FIND = 0x5B3 + SYS___FP_SWAPMODE = 0x5AF + SYS___GETUSERID = 0x5AB + SYS___GET_CPUID = 0x5B9 + SYS___GET_SYSTEM_SETTINGS = 0x5BA + SYS___IPDOMAINNAME = 0x5AC + SYS___MAP_INIT = 0x5A7 + SYS___MAP_SERVICE = 0x5A8 + SYS___MOUNT = 0x5AA + SYS___MSGRCV_TIMED = 0x5B7 + SYS___RES = 0x5D6 + SYS___SEMOP_TIMED = 0x5B8 + SYS___SERVER_THREADS_QUERY = 0x5B4 + SYS_FPRINTF = 0x06D + SYS_FSCANF = 0x06A + SYS_PRINTF = 0x06F + SYS_SETBUF = 0x06B + SYS_SETVBUF = 0x06C + SYS_SSCANF = 0x06E + SYS___CATGETS_A = 0x6C0 + SYS___CHAUDIT_A = 0x6F4 + SYS___CHMOD_A = 0x6E8 + SYS___COLLATE_INIT_A = 0x6AC + SYS___CREAT_A = 0x6F6 + SYS___CTYPE_INIT_A = 0x6AF + SYS___DLLLOAD_A = 0x6DF + SYS___DLLQUERYFN_A = 0x6E0 + SYS___DLLQUERYVAR_A = 0x6E1 + SYS___E2A_L = 0x6E3 + SYS___EXECLE_A = 0x6A0 + SYS___EXECLP_A = 0x6A4 + SYS___EXECVE_A = 0x6C1 + SYS___EXECVP_A = 0x6C2 + SYS___EXECV_A = 0x6B1 + SYS___FPRINTF_A = 0x6FA + SYS___GETADDRINFO_A = 0x6BF + SYS___GETNAMEINFO_A = 0x6C4 + SYS___GET_WCTYPE_STD_A = 0x6AE + SYS___ICONV_OPEN_A = 0x6DE + SYS___IF_INDEXTONAME_A = 0x6DC + SYS___IF_NAMETOINDEX_A = 0x6DB + SYS___ISWCTYPE_A = 0x6B0 + SYS___IS_WCTYPE_STD_A = 0x6B2 + SYS___LOCALECONV_A = 0x6B8 + SYS___LOCALECONV_STD_A = 0x6B9 + SYS___LOCALE_INIT_A = 0x6B7 + SYS___LSTAT_A = 0x6EE + SYS___LSTAT_O_A = 0x6EF + SYS___MKDIR_A = 0x6E9 + SYS___MKFIFO_A = 0x6EC + SYS___MKNOD_A = 0x6F0 + SYS___MONETARY_INIT_A = 0x6BC + SYS___MOUNT_A = 0x6F1 + SYS___NL_CSINFO_A = 0x6D6 + SYS___NL_LANGINFO_A = 0x6BA + SYS___NL_LNAGINFO_STD_A = 0x6BB + SYS___NL_MONINFO_A = 0x6D7 + SYS___NL_NUMINFO_A = 0x6D8 + SYS___NL_RESPINFO_A = 0x6D9 + SYS___NL_TIMINFO_A = 0x6DA + SYS___NUMERIC_INIT_A = 0x6C6 + SYS___OPEN_A = 0x6F7 + SYS___PRINTF_A = 0x6DD + SYS___RESP_INIT_A = 0x6C7 + SYS___RPMATCH_A = 0x6C8 + SYS___RPMATCH_C_A = 0x6C9 + SYS___RPMATCH_STD_A = 0x6CA + SYS___SETLOCALE_A = 0x6F9 + SYS___SPAWNP_A = 0x6C5 + SYS___SPAWN_A = 0x6C3 + SYS___SPRINTF_A = 0x6FB + SYS___STAT_A = 0x6EA + SYS___STAT_O_A = 0x6EB + SYS___STRCOLL_STD_A = 0x6A1 + SYS___STRFMON_A = 0x6BD + SYS___STRFMON_STD_A = 0x6BE + SYS___STRFTIME_A = 0x6CC + SYS___STRFTIME_STD_A = 0x6CD + SYS___STRPTIME_A = 0x6CE + SYS___STRPTIME_STD_A = 0x6CF + SYS___STRXFRM_A = 0x6A2 + SYS___STRXFRM_C_A = 0x6A3 + SYS___STRXFRM_STD_A = 0x6A5 + SYS___SYNTAX_INIT_A = 0x6D4 + SYS___TIME_INIT_A = 0x6CB + SYS___TOD_INIT_A = 0x6D5 + SYS___TOWLOWER_A = 0x6B3 + SYS___TOWLOWER_STD_A = 0x6B4 + SYS___TOWUPPER_A = 0x6B5 + SYS___TOWUPPER_STD_A = 0x6B6 + SYS___UMOUNT_A = 0x6F2 + SYS___VFPRINTF_A = 0x6FC + SYS___VPRINTF_A = 0x6FD + SYS___VSPRINTF_A = 0x6FE + SYS___VSWPRINTF_A = 0x6FF + SYS___WCSCOLL_A = 0x6A6 + SYS___WCSCOLL_C_A = 0x6A7 + SYS___WCSCOLL_STD_A = 0x6A8 + SYS___WCSFTIME_A = 0x6D0 + SYS___WCSFTIME_STD_A = 0x6D1 + SYS___WCSXFRM_A = 0x6A9 + SYS___WCSXFRM_C_A = 0x6AA + SYS___WCSXFRM_STD_A = 0x6AB + SYS___WCTYPE_A = 0x6AD + SYS___W_GETMNTENT_A = 0x6F5 + SYS_____CCSIDTYPE_A = 0x6E6 + SYS_____CHATTR_A = 0x6E2 + SYS_____CSNAMETYPE_A = 0x6E7 + SYS_____OPEN_STAT_A = 0x6ED + SYS_____SPAWN2_A = 0x6D2 + SYS_____SPAWNP2_A = 0x6D3 + SYS_____TOCCSID_A = 0x6E4 + SYS_____TOCSNAME_A = 0x6E5 + SYS_ACL_FREE = 0x7FF + SYS_ACL_INIT = 0x7FE + SYS_FWIDE = 0x7DF + SYS_FWPRINTF = 0x7D1 + SYS_FWRITE = 0x07E + SYS_FWSCANF = 0x7D5 + SYS_GETCHAR = 0x07B + SYS_GETS = 0x07C + SYS_M_CREATE_LAYOUT = 0x7C9 + SYS_M_DESTROY_LAYOUT = 0x7CA + SYS_M_GETVALUES_LAYOUT = 0x7CB + SYS_M_SETVALUES_LAYOUT = 0x7CC + SYS_M_TRANSFORM_LAYOUT = 0x7CD + SYS_M_WTRANSFORM_LAYOUT = 0x7CE + SYS_PREAD = 0x7C7 + SYS_PUTC = 0x07D + SYS_PUTCHAR = 0x07A + SYS_PUTS = 0x07F + SYS_PWRITE = 0x7C8 + SYS_TOWCTRAN = 0x7D8 + SYS_TOWCTRANS = 0x7D8 + SYS_UNATEXIT = 0x7B5 + SYS_VFWPRINT = 0x7D3 + SYS_VFWPRINTF = 0x7D3 + SYS_VWPRINTF = 0x7D4 + SYS_WCTRANS = 0x7D7 + SYS_WPRINTF = 0x7D2 + SYS_WSCANF = 0x7D6 + SYS___ASCTIME_R_A = 0x7A1 + SYS___BASENAME_A = 0x7DC + SYS___BTOWC_A = 0x7E4 + SYS___CDUMP_A = 0x7B7 + SYS___CEE3DMP_A = 0x7B6 + SYS___CEILF_H = 0x7F4 + SYS___CEILL_H = 0x7F5 + SYS___CEIL_H = 0x7EA + SYS___CRYPT_A = 0x7BE + SYS___CSNAP_A = 0x7B8 + SYS___CTEST_A = 0x7B9 + SYS___CTIME_R_A = 0x7A2 + SYS___CTRACE_A = 0x7BA + SYS___DBM_OPEN_A = 0x7E6 + SYS___DIRNAME_A = 0x7DD + SYS___FABSF_H = 0x7FA + SYS___FABSL_H = 0x7FB + SYS___FABS_H = 0x7ED + SYS___FGETWC_A = 0x7AA + SYS___FGETWS_A = 0x7AD + SYS___FLOORF_H = 0x7F6 + SYS___FLOORL_H = 0x7F7 + SYS___FLOOR_H = 0x7EB + SYS___FPUTWC_A = 0x7A5 + SYS___FPUTWS_A = 0x7A8 + SYS___GETTIMEOFDAY_A = 0x7AE + SYS___GETWCHAR_A = 0x7AC + SYS___GETWC_A = 0x7AB + SYS___GLOB_A = 0x7DE + SYS___GMTIME_A = 0x7AF + SYS___GMTIME_R_A = 0x7B0 + SYS___INET_PTON_A = 0x7BC + SYS___J0_H = 0x7EE + SYS___J1_H = 0x7EF + SYS___JN_H = 0x7F0 + SYS___LOCALTIME_A = 0x7B1 + SYS___LOCALTIME_R_A = 0x7B2 + SYS___MALLOC24 = 0x7FC + SYS___MALLOC31 = 0x7FD + SYS___MKTIME_A = 0x7B3 + SYS___MODFF_H = 0x7F8 + SYS___MODFL_H = 0x7F9 + SYS___MODF_H = 0x7EC + SYS___OPENDIR_A = 0x7C2 + SYS___OSNAME = 0x7E0 + SYS___PUTWCHAR_A = 0x7A7 + SYS___PUTWC_A = 0x7A6 + SYS___READDIR_A = 0x7C3 + SYS___STRTOLL_A = 0x7A3 + SYS___STRTOULL_A = 0x7A4 + SYS___SYSLOG_A = 0x7BD + SYS___TZZNA = 0x7B4 + SYS___UNGETWC_A = 0x7A9 + SYS___UTIME_A = 0x7A0 + SYS___VFPRINTF2_A = 0x7E7 + SYS___VPRINTF2_A = 0x7E8 + SYS___VSPRINTF2_A = 0x7E9 + SYS___VSWPRNTF2_A = 0x7BB + SYS___WCSTOD_A = 0x7D9 + SYS___WCSTOL_A = 0x7DA + SYS___WCSTOUL_A = 0x7DB + SYS___WCTOB_A = 0x7E5 + SYS___Y0_H = 0x7F1 + SYS___Y1_H = 0x7F2 + SYS___YN_H = 0x7F3 + SYS_____OPENDIR2_A = 0x7BF + SYS_____OSNAME_A = 0x7E1 + SYS_____READDIR2_A = 0x7C0 + SYS_DLCLOSE = 0x8DF + SYS_DLERROR = 0x8E0 + SYS_DLOPEN = 0x8DD + SYS_DLSYM = 0x8DE + SYS_FLOCKFILE = 0x8D3 + SYS_FTRYLOCKFILE = 0x8D4 + SYS_FUNLOCKFILE = 0x8D5 + SYS_GETCHAR_UNLOCKED = 0x8D7 + SYS_GETC_UNLOCKED = 0x8D6 + SYS_PUTCHAR_UNLOCKED = 0x8D9 + SYS_PUTC_UNLOCKED = 0x8D8 + SYS_SNPRINTF = 0x8DA + SYS_VSNPRINTF = 0x8DB + SYS_WCSCSPN = 0x08B + SYS_WCSLEN = 0x08C + SYS_WCSNCAT = 0x08D + SYS_WCSNCMP = 0x08A + SYS_WCSNCPY = 0x08F + SYS_WCSSPN = 0x08E + SYS___ABSF_H = 0x8E7 + SYS___ABSL_H = 0x8E8 + SYS___ABS_H = 0x8E6 + SYS___ACOSF_H = 0x8EA + SYS___ACOSH_H = 0x8EC + SYS___ACOSL_H = 0x8EB + SYS___ACOS_H = 0x8E9 + SYS___ASINF_H = 0x8EE + SYS___ASINH_H = 0x8F0 + SYS___ASINL_H = 0x8EF + SYS___ASIN_H = 0x8ED + SYS___ATAN2F_H = 0x8F8 + SYS___ATAN2L_H = 0x8F9 + SYS___ATAN2_H = 0x8F7 + SYS___ATANF_H = 0x8F2 + SYS___ATANHF_H = 0x8F5 + SYS___ATANHL_H = 0x8F6 + SYS___ATANH_H = 0x8F4 + SYS___ATANL_H = 0x8F3 + SYS___ATAN_H = 0x8F1 + SYS___CBRT_H = 0x8FA + SYS___COPYSIGNF_H = 0x8FB + SYS___COPYSIGNL_H = 0x8FC + SYS___COSF_H = 0x8FE + SYS___COSL_H = 0x8FF + SYS___COS_H = 0x8FD + SYS___DLERROR_A = 0x8D2 + SYS___DLOPEN_A = 0x8D0 + SYS___DLSYM_A = 0x8D1 + SYS___GETUTXENT_A = 0x8C6 + SYS___GETUTXID_A = 0x8C7 + SYS___GETUTXLINE_A = 0x8C8 + SYS___ITOA = 0x8AA + SYS___ITOA_A = 0x8B0 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 + SYS___LE_MSG_ADD_INSERT = 0x8A6 + SYS___LE_MSG_GET = 0x8A7 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 + SYS___LE_MSG_WRITE = 0x8A9 + SYS___LLTOA = 0x8AE + SYS___LLTOA_A = 0x8B4 + SYS___LTOA = 0x8AC + SYS___LTOA_A = 0x8B2 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC + SYS___PUTC_UNLOCKED_A = 0x8CB + SYS___PUTUTXLINE_A = 0x8C9 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 + SYS___REXEC_A = 0x8C4 + SYS___REXEC_AF_A = 0x8C5 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 + SYS___SNPRINTF_A = 0x8CD + SYS___SUPERKILL = 0x8A4 + SYS___TCGETATTR_A = 0x8A1 + SYS___TCSETATTR_A = 0x8A2 + SYS___ULLTOA = 0x8AF + SYS___ULLTOA_A = 0x8B5 + SYS___ULTOA = 0x8AD + SYS___ULTOA_A = 0x8B3 + SYS___UTOA = 0x8AB + SYS___UTOA_A = 0x8B1 + SYS___VHM_EVENT = 0x8E4 + SYS___VSNPRINTF_A = 0x8CE + SYS_____GETENV_A = 0x8C3 + SYS_____UTMPXNAME_A = 0x8CA + SYS_CACOSH = 0x9A0 + SYS_CACOSHF = 0x9A3 + SYS_CACOSHL = 0x9A6 + SYS_CARG = 0x9A9 + SYS_CARGF = 0x9AC + SYS_CARGL = 0x9AF + SYS_CASIN = 0x9B2 + SYS_CASINF = 0x9B5 + SYS_CASINH = 0x9BB + SYS_CASINHF = 0x9BE + SYS_CASINHL = 0x9C1 + SYS_CASINL = 0x9B8 + SYS_CATAN = 0x9C4 + SYS_CATANF = 0x9C7 + SYS_CATANH = 0x9CD + SYS_CATANHF = 0x9D0 + SYS_CATANHL = 0x9D3 + SYS_CATANL = 0x9CA + SYS_CCOS = 0x9D6 + SYS_CCOSF = 0x9D9 + SYS_CCOSH = 0x9DF + SYS_CCOSHF = 0x9E2 + SYS_CCOSHL = 0x9E5 + SYS_CCOSL = 0x9DC + SYS_CEXP = 0x9E8 + SYS_CEXPF = 0x9EB + SYS_CEXPL = 0x9EE + SYS_CIMAG = 0x9F1 + SYS_CIMAGF = 0x9F4 + SYS_CIMAGL = 0x9F7 + SYS_CLOGF = 0x9FD + SYS_MEMCHR = 0x09B + SYS_MEMCMP = 0x09A + SYS_STRCOLL = 0x09C + SYS_STRNCMP = 0x09D + SYS_STRRCHR = 0x09F + SYS_STRXFRM = 0x09E + SYS___CACOSHF_B = 0x9A4 + SYS___CACOSHF_H = 0x9A5 + SYS___CACOSHL_B = 0x9A7 + SYS___CACOSHL_H = 0x9A8 + SYS___CACOSH_B = 0x9A1 + SYS___CACOSH_H = 0x9A2 + SYS___CARGF_B = 0x9AD + SYS___CARGF_H = 0x9AE + SYS___CARGL_B = 0x9B0 + SYS___CARGL_H = 0x9B1 + SYS___CARG_B = 0x9AA + SYS___CARG_H = 0x9AB + SYS___CASINF_B = 0x9B6 + SYS___CASINF_H = 0x9B7 + SYS___CASINHF_B = 0x9BF + SYS___CASINHF_H = 0x9C0 + SYS___CASINHL_B = 0x9C2 + SYS___CASINHL_H = 0x9C3 + SYS___CASINH_B = 0x9BC + SYS___CASINH_H = 0x9BD + SYS___CASINL_B = 0x9B9 + SYS___CASINL_H = 0x9BA + SYS___CASIN_B = 0x9B3 + SYS___CASIN_H = 0x9B4 + SYS___CATANF_B = 0x9C8 + SYS___CATANF_H = 0x9C9 + SYS___CATANHF_B = 0x9D1 + SYS___CATANHF_H = 0x9D2 + SYS___CATANHL_B = 0x9D4 + SYS___CATANHL_H = 0x9D5 + SYS___CATANH_B = 0x9CE + SYS___CATANH_H = 0x9CF + SYS___CATANL_B = 0x9CB + SYS___CATANL_H = 0x9CC + SYS___CATAN_B = 0x9C5 + SYS___CATAN_H = 0x9C6 + SYS___CCOSF_B = 0x9DA + SYS___CCOSF_H = 0x9DB + SYS___CCOSHF_B = 0x9E3 + SYS___CCOSHF_H = 0x9E4 + SYS___CCOSHL_B = 0x9E6 + SYS___CCOSHL_H = 0x9E7 + SYS___CCOSH_B = 0x9E0 + SYS___CCOSH_H = 0x9E1 + SYS___CCOSL_B = 0x9DD + SYS___CCOSL_H = 0x9DE + SYS___CCOS_B = 0x9D7 + SYS___CCOS_H = 0x9D8 + SYS___CEXPF_B = 0x9EC + SYS___CEXPF_H = 0x9ED + SYS___CEXPL_B = 0x9EF + SYS___CEXPL_H = 0x9F0 + SYS___CEXP_B = 0x9E9 + SYS___CEXP_H = 0x9EA + SYS___CIMAGF_B = 0x9F5 + SYS___CIMAGF_H = 0x9F6 + SYS___CIMAGL_B = 0x9F8 + SYS___CIMAGL_H = 0x9F9 + SYS___CIMAG_B = 0x9F2 + SYS___CIMAG_H = 0x9F3 + SYS___CLOG = 0x9FA + SYS___CLOGF_B = 0x9FE + SYS___CLOGF_H = 0x9FF + SYS___CLOG_B = 0x9FB + SYS___CLOG_H = 0x9FC + SYS_ISWCTYPE = 0x10C + SYS_ISWXDIGI = 0x10A + SYS_ISWXDIGIT = 0x10A + SYS_MBSINIT = 0x10F + SYS_TOWLOWER = 0x10D + SYS_TOWUPPER = 0x10E + SYS_WCTYPE = 0x10B + SYS_WCSSTR = 0x11B + SYS___RPMTCH = 0x11A + SYS_WCSTOD = 0x12E + SYS_WCSTOK = 0x12C + SYS_WCSTOL = 0x12D + SYS_WCSTOUL = 0x12F + SYS_FGETWC = 0x13C + SYS_FGETWS = 0x13D + SYS_FPUTWC = 0x13E + SYS_FPUTWS = 0x13F + SYS_REGERROR = 0x13B + SYS_REGFREE = 0x13A + SYS_COLLEQUIV = 0x14F + SYS_COLLTOSTR = 0x14E + SYS_ISMCCOLLEL = 0x14C + SYS_STRTOCOLL = 0x14D + SYS_DLLFREE = 0x16F + SYS_DLLQUERYFN = 0x16D + SYS_DLLQUERYVAR = 0x16E + SYS_GETMCCOLL = 0x16A + SYS_GETWMCCOLL = 0x16B + SYS___ERR2AD = 0x16C + SYS_CFSETOSPEED = 0x17A + SYS_CHDIR = 0x17B + SYS_CHMOD = 0x17C + SYS_CHOWN = 0x17D + SYS_CLOSE = 0x17E + SYS_CLOSEDIR = 0x17F + SYS_LOG = 0x017 + SYS_COSH = 0x018 + SYS_FCHMOD = 0x18A + SYS_FCHOWN = 0x18B + SYS_FCNTL = 0x18C + SYS_FILENO = 0x18D + SYS_FORK = 0x18E + SYS_FPATHCONF = 0x18F + SYS_GETLOGIN = 0x19A + SYS_GETPGRP = 0x19C + SYS_GETPID = 0x19D + SYS_GETPPID = 0x19E + SYS_GETPWNAM = 0x19F + SYS_TANH = 0x019 + SYS_W_GETMNTENT = 0x19B + SYS_POW = 0x020 + SYS_PTHREAD_SELF = 0x20A + SYS_PTHREAD_SETINTR = 0x20B + SYS_PTHREAD_SETINTRTYPE = 0x20C + SYS_PTHREAD_SETSPECIFIC = 0x20D + SYS_PTHREAD_TESTINTR = 0x20E + SYS_PTHREAD_YIELD = 0x20F + SYS_SQRT = 0x021 + SYS_FLOOR = 0x022 + SYS_J1 = 0x023 + SYS_WCSPBRK = 0x23F + SYS_BSEARCH = 0x24C + SYS_FABS = 0x024 + SYS_GETENV = 0x24A + SYS_LDIV = 0x24D + SYS_SYSTEM = 0x24B + SYS_FMOD = 0x025 + SYS___RETHROW = 0x25F + SYS___THROW = 0x25E + SYS_J0 = 0x026 + SYS_PUTENV = 0x26A + SYS___GETENV = 0x26F + SYS_SEMCTL = 0x27A + SYS_SEMGET = 0x27B + SYS_SEMOP = 0x27C + SYS_SHMAT = 0x27D + SYS_SHMCTL = 0x27E + SYS_SHMDT = 0x27F + SYS_YN = 0x027 + SYS_JN = 0x028 + SYS_SIGALTSTACK = 0x28A + SYS_SIGHOLD = 0x28B + SYS_SIGIGNORE = 0x28C + SYS_SIGINTERRUPT = 0x28D + SYS_SIGPAUSE = 0x28E + SYS_SIGRELSE = 0x28F + SYS_GETOPT = 0x29A + SYS_GETSUBOPT = 0x29D + SYS_LCHOWN = 0x29B + SYS_SETPGRP = 0x29E + SYS_TRUNCATE = 0x29C + SYS_Y0 = 0x029 + SYS___GDERR = 0x29F + SYS_ISALPHA = 0x030 + SYS_VFORK = 0x30F + SYS__LONGJMP = 0x30D + SYS__SETJMP = 0x30E + SYS_GLOB = 0x31A + SYS_GLOBFREE = 0x31B + SYS_ISALNUM = 0x031 + SYS_PUTW = 0x31C + SYS_SEEKDIR = 0x31D + SYS_TELLDIR = 0x31E + SYS_TEMPNAM = 0x31F + SYS_GETTIMEOFDAY_R = 0x32E + SYS_ISLOWER = 0x032 + SYS_LGAMMA = 0x32C + SYS_REMAINDER = 0x32A + SYS_SCALB = 0x32B + SYS_SYNC = 0x32F + SYS_TTYSLOT = 0x32D + SYS_ENDPROTOENT = 0x33A + SYS_ENDSERVENT = 0x33B + SYS_GETHOSTBYADDR = 0x33D + SYS_GETHOSTBYADDR_R = 0x33C + SYS_GETHOSTBYNAME = 0x33F + SYS_GETHOSTBYNAME_R = 0x33E + SYS_ISCNTRL = 0x033 + SYS_GETSERVBYNAME = 0x34A + SYS_GETSERVBYPORT = 0x34B + SYS_GETSERVENT = 0x34C + SYS_GETSOCKNAME = 0x34D + SYS_GETSOCKOPT = 0x34E + SYS_INET_ADDR = 0x34F + SYS_ISDIGIT = 0x034 + SYS_ISGRAPH = 0x035 + SYS_SELECT = 0x35B + SYS_SELECTEX = 0x35C + SYS_SEND = 0x35D + SYS_SENDTO = 0x35F + SYS_CHROOT = 0x36A + SYS_ISNAN = 0x36D + SYS_ISUPPER = 0x036 + SYS_ULIMIT = 0x36C + SYS_UTIMES = 0x36E + SYS_W_STATVFS = 0x36B + SYS___H_ERRNO = 0x36F + SYS_GRANTPT = 0x37A + SYS_ISPRINT = 0x037 + SYS_TCGETSID = 0x37C + SYS_UNLOCKPT = 0x37B + SYS___TCGETCP = 0x37D + SYS___TCSETCP = 0x37E + SYS___TCSETTABLES = 0x37F + SYS_ISPUNCT = 0x038 + SYS_NLIST = 0x38C + SYS___IPDBCS = 0x38D + SYS___IPDSPX = 0x38E + SYS___IPMSGC = 0x38F + SYS___STHOSTENT = 0x38B + SYS___STSERVENT = 0x38A + SYS_ISSPACE = 0x039 + SYS_COS = 0x040 + SYS_T_ALLOC = 0x40A + SYS_T_BIND = 0x40B + SYS_T_CLOSE = 0x40C + SYS_T_CONNECT = 0x40D + SYS_T_ERROR = 0x40E + SYS_T_FREE = 0x40F + SYS_TAN = 0x041 + SYS_T_RCVREL = 0x41A + SYS_T_RCVUDATA = 0x41B + SYS_T_RCVUDERR = 0x41C + SYS_T_SND = 0x41D + SYS_T_SNDDIS = 0x41E + SYS_T_SNDREL = 0x41F + SYS_GETPMSG = 0x42A + SYS_ISASTREAM = 0x42B + SYS_PUTMSG = 0x42C + SYS_PUTPMSG = 0x42D + SYS_SINH = 0x042 + SYS___ISPOSIXON = 0x42E + SYS___OPENMVSREL = 0x42F + SYS_ACOS = 0x043 + SYS_ATAN = 0x044 + SYS_ATAN2 = 0x045 + SYS_FTELL = 0x046 + SYS_FGETPOS = 0x047 + SYS_SOCK_DEBUG = 0x47A + SYS_SOCK_DO_TESTSTOR = 0x47D + SYS_TAKESOCKET = 0x47E + SYS___SERVER_INIT = 0x47F + SYS_FSEEK = 0x048 + SYS___IPHOST = 0x48B + SYS___IPNODE = 0x48C + SYS___SERVER_CLASSIFY_CREATE = 0x48D + SYS___SERVER_CLASSIFY_DESTROY = 0x48E + SYS___SERVER_CLASSIFY_RESET = 0x48F + SYS___SMF_RECORD = 0x48A + SYS_FSETPOS = 0x049 + SYS___FNWSA = 0x49B + SYS___SPAWN2 = 0x49D + SYS___SPAWNP2 = 0x49E + SYS_ATOF = 0x050 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C + SYS_PTHREAD_RWLOCK_INIT = 0x50D + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F + SYS_ATOI = 0x051 + SYS___FP_CLASS = 0x51D + SYS___FP_CLR_FLAG = 0x51A + SYS___FP_FINITE = 0x51E + SYS___FP_ISNAN = 0x51F + SYS___FP_RAISE_XCP = 0x51C + SYS___FP_READ_FLAG = 0x51B + SYS_RAND = 0x052 + SYS_SIGTIMEDWAIT = 0x52D + SYS_SIGWAITINFO = 0x52E + SYS___CHKBFP = 0x52F + SYS___FPC_RS = 0x52C + SYS___FPC_RW = 0x52A + SYS___FPC_SM = 0x52B + SYS_STRTOD = 0x053 + SYS_STRTOL = 0x054 + SYS_STRTOUL = 0x055 + SYS_MALLOC = 0x056 + SYS_SRAND = 0x057 + SYS_CALLOC = 0x058 + SYS_FREE = 0x059 + SYS___OSENV = 0x59F + SYS___W_PIOCTL = 0x59E + SYS_LONGJMP = 0x060 + SYS___FLOORF_B = 0x60A + SYS___FLOORL_B = 0x60B + SYS___FREXPF_B = 0x60C + SYS___FREXPL_B = 0x60D + SYS___LDEXPF_B = 0x60E + SYS___LDEXPL_B = 0x60F + SYS_SIGNAL = 0x061 + SYS___ATAN2F_B = 0x61A + SYS___ATAN2L_B = 0x61B + SYS___COSHF_B = 0x61C + SYS___COSHL_B = 0x61D + SYS___EXPF_B = 0x61E + SYS___EXPL_B = 0x61F + SYS_TMPNAM = 0x062 + SYS___ABSF_B = 0x62A + SYS___ABSL_B = 0x62C + SYS___ABS_B = 0x62B + SYS___FMODF_B = 0x62D + SYS___FMODL_B = 0x62E + SYS___MODFF_B = 0x62F + SYS_ATANL = 0x63A + SYS_CEILF = 0x63B + SYS_CEILL = 0x63C + SYS_COSF = 0x63D + SYS_COSHF = 0x63F + SYS_COSL = 0x63E + SYS_REMOVE = 0x063 + SYS_POWL = 0x64A + SYS_RENAME = 0x064 + SYS_SINF = 0x64B + SYS_SINHF = 0x64F + SYS_SINL = 0x64C + SYS_SQRTF = 0x64D + SYS_SQRTL = 0x64E + SYS_BTOWC = 0x65F + SYS_FREXPL = 0x65A + SYS_LDEXPF = 0x65B + SYS_LDEXPL = 0x65C + SYS_MODFF = 0x65D + SYS_MODFL = 0x65E + SYS_TMPFILE = 0x065 + SYS_FREOPEN = 0x066 + SYS___CHARMAP_INIT_A = 0x66E + SYS___GETHOSTBYADDR_R_A = 0x66C + SYS___GETHOSTBYNAME_A = 0x66A + SYS___GETHOSTBYNAME_R_A = 0x66D + SYS___MBLEN_A = 0x66F + SYS___RES_INIT_A = 0x66B + SYS_FCLOSE = 0x067 + SYS___GETGRGID_R_A = 0x67D + SYS___WCSTOMBS_A = 0x67A + SYS___WCSTOMBS_STD_A = 0x67B + SYS___WCSWIDTH_A = 0x67C + SYS___WCSWIDTH_ASIA = 0x67F + SYS___WCSWIDTH_STD_A = 0x67E + SYS_FFLUSH = 0x068 + SYS___GETLOGIN_R_A = 0x68E + SYS___GETPWNAM_R_A = 0x68C + SYS___GETPWUID_R_A = 0x68D + SYS___TTYNAME_R_A = 0x68F + SYS___WCWIDTH_ASIA = 0x68B + SYS___WCWIDTH_STD_A = 0x68A + SYS_FOPEN = 0x069 + SYS___REGEXEC_A = 0x69A + SYS___REGEXEC_STD_A = 0x69B + SYS___REGFREE_A = 0x69C + SYS___REGFREE_STD_A = 0x69D + SYS___STRCOLL_A = 0x69E + SYS___STRCOLL_C_A = 0x69F + SYS_SCANF = 0x070 + SYS___A64L_A = 0x70C + SYS___ECVT_A = 0x70D + SYS___FCVT_A = 0x70E + SYS___GCVT_A = 0x70F + SYS___STRTOUL_A = 0x70A + SYS_____AE_CORRESTBL_QUERY_A = 0x70B + SYS_SPRINTF = 0x071 + SYS___ACCESS_A = 0x71F + SYS___CATOPEN_A = 0x71E + SYS___GETOPT_A = 0x71D + SYS___REALPATH_A = 0x71A + SYS___SETENV_A = 0x71B + SYS___SYSTEM_A = 0x71C + SYS_FGETC = 0x072 + SYS___GAI_STRERROR_A = 0x72F + SYS___RMDIR_A = 0x72A + SYS___STATVFS_A = 0x72B + SYS___SYMLINK_A = 0x72C + SYS___TRUNCATE_A = 0x72D + SYS___UNLINK_A = 0x72E + SYS_VFPRINTF = 0x073 + SYS___ISSPACE_A = 0x73A + SYS___ISUPPER_A = 0x73B + SYS___ISWALNUM_A = 0x73F + SYS___ISXDIGIT_A = 0x73C + SYS___TOLOWER_A = 0x73D + SYS___TOUPPER_A = 0x73E + SYS_VPRINTF = 0x074 + SYS___CONFSTR_A = 0x74B + SYS___FDOPEN_A = 0x74E + SYS___FLDATA_A = 0x74F + SYS___FTOK_A = 0x74C + SYS___ISWXDIGIT_A = 0x74A + SYS___MKTEMP_A = 0x74D + SYS_VSPRINTF = 0x075 + SYS___GETGRGID_A = 0x75A + SYS___GETGRNAM_A = 0x75B + SYS___GETGROUPSBYNAME_A = 0x75C + SYS___GETHOSTENT_A = 0x75D + SYS___GETHOSTNAME_A = 0x75E + SYS___GETLOGIN_A = 0x75F + SYS_GETC = 0x076 + SYS___CREATEWORKUNIT_A = 0x76A + SYS___CTERMID_A = 0x76B + SYS___FMTMSG_A = 0x76C + SYS___INITGROUPS_A = 0x76D + SYS___MSGRCV_A = 0x76F + SYS_____LOGIN_A = 0x76E + SYS_FGETS = 0x077 + SYS___STRCASECMP_A = 0x77B + SYS___STRNCASECMP_A = 0x77C + SYS___TTYNAME_A = 0x77D + SYS___UNAME_A = 0x77E + SYS___UTIMES_A = 0x77F + SYS_____SERVER_PWU_A = 0x77A + SYS_FPUTC = 0x078 + SYS___CREAT_O_A = 0x78E + SYS___ENVNA = 0x78F + SYS___FREAD_A = 0x78A + SYS___FWRITE_A = 0x78B + SYS___ISASCII = 0x78D + SYS___OPEN_O_A = 0x78C + SYS_FPUTS = 0x079 + SYS___ASCTIME_A = 0x79C + SYS___CTIME_A = 0x79D + SYS___GETDATE_A = 0x79E + SYS___GETSERVBYPORT_A = 0x79A + SYS___GETSERVENT_A = 0x79B + SYS___TZSET_A = 0x79F + SYS_ACL_FROM_TEXT = 0x80C + SYS_ACL_SET_FD = 0x80A + SYS_ACL_SET_FILE = 0x80B + SYS_ACL_SORT = 0x80E + SYS_ACL_TO_TEXT = 0x80D + SYS_UNGETC = 0x080 + SYS___SHUTDOWN_REGISTRATION = 0x80F + SYS_FREAD = 0x081 + SYS_FREEADDRINFO = 0x81A + SYS_GAI_STRERROR = 0x81B + SYS_REXEC_AF = 0x81C + SYS___DYNALLOC_A = 0x81F + SYS___POE = 0x81D + SYS_WCSTOMBS = 0x082 + SYS___INET_ADDR_A = 0x82F + SYS___NLIST_A = 0x82A + SYS_____TCGETCP_A = 0x82B + SYS_____TCSETCP_A = 0x82C + SYS_____W_PIOCTL_A = 0x82E + SYS_MBTOWC = 0x083 + SYS___CABEND = 0x83D + SYS___LE_CIB_GET = 0x83E + SYS___RECVMSG_A = 0x83B + SYS___SENDMSG_A = 0x83A + SYS___SET_LAA_FOR_JIT = 0x83F + SYS_____LCHATTR_A = 0x83C + SYS_WCTOMB = 0x084 + SYS___CBRTL_B = 0x84A + SYS___COPYSIGNF_B = 0x84B + SYS___COPYSIGNL_B = 0x84C + SYS___COTANF_B = 0x84D + SYS___COTANL_B = 0x84F + SYS___COTAN_B = 0x84E + SYS_MBSTOWCS = 0x085 + SYS___LOG1PL_B = 0x85A + SYS___LOG2F_B = 0x85B + SYS___LOG2L_B = 0x85D + SYS___LOG2_B = 0x85C + SYS___REMAINDERF_B = 0x85E + SYS___REMAINDERL_B = 0x85F + SYS_ACOSHF = 0x86E + SYS_ACOSHL = 0x86F + SYS_WCSCPY = 0x086 + SYS___ERFCF_B = 0x86D + SYS___ERFF_B = 0x86C + SYS___LROUNDF_B = 0x86A + SYS___LROUND_B = 0x86B + SYS_COTANL = 0x87A + SYS_EXP2F = 0x87B + SYS_EXP2L = 0x87C + SYS_EXPM1F = 0x87D + SYS_EXPM1L = 0x87E + SYS_FDIMF = 0x87F + SYS_WCSCAT = 0x087 + SYS___COTANL = 0x87A + SYS_REMAINDERF = 0x88A + SYS_REMAINDERL = 0x88B + SYS_REMAINDF = 0x88A + SYS_REMAINDL = 0x88B + SYS_REMQUO = 0x88D + SYS_REMQUOF = 0x88C + SYS_REMQUOL = 0x88E + SYS_TGAMMAF = 0x88F + SYS_WCSCHR = 0x088 + SYS_ERFCF = 0x89B + SYS_ERFCL = 0x89C + SYS_ERFL = 0x89A + SYS_EXP2 = 0x89E + SYS_WCSCMP = 0x089 + SYS___EXP2_B = 0x89D + SYS___FAR_JUMP = 0x89F + SYS_ABS = 0x090 + SYS___ERFCL_H = 0x90A + SYS___EXPF_H = 0x90C + SYS___EXPL_H = 0x90D + SYS___EXPM1_H = 0x90E + SYS___EXP_H = 0x90B + SYS___FDIM_H = 0x90F + SYS_DIV = 0x091 + SYS___LOG2F_H = 0x91F + SYS___LOG2_H = 0x91E + SYS___LOGB_H = 0x91D + SYS___LOGF_H = 0x91B + SYS___LOGL_H = 0x91C + SYS___LOG_H = 0x91A + SYS_LABS = 0x092 + SYS___POWL_H = 0x92A + SYS___REMAINDER_H = 0x92B + SYS___RINT_H = 0x92C + SYS___SCALB_H = 0x92D + SYS___SINF_H = 0x92F + SYS___SIN_H = 0x92E + SYS_STRNCPY = 0x093 + SYS___TANHF_H = 0x93B + SYS___TANHL_H = 0x93C + SYS___TANH_H = 0x93A + SYS___TGAMMAF_H = 0x93E + SYS___TGAMMA_H = 0x93D + SYS___TRUNC_H = 0x93F + SYS_MEMCPY = 0x094 + SYS_VFWSCANF = 0x94A + SYS_VSWSCANF = 0x94E + SYS_VWSCANF = 0x94C + SYS_INET6_RTH_ADD = 0x95D + SYS_INET6_RTH_INIT = 0x95C + SYS_INET6_RTH_REVERSE = 0x95E + SYS_INET6_RTH_SEGMENTS = 0x95F + SYS_INET6_RTH_SPACE = 0x95B + SYS_MEMMOVE = 0x095 + SYS_WCSTOLD = 0x95A + SYS_STRCPY = 0x096 + SYS_STRCMP = 0x097 + SYS_CABS = 0x98E + SYS_STRCAT = 0x098 + SYS___CABS_B = 0x98F + SYS___POW_II = 0x98A + SYS___POW_II_B = 0x98B + SYS___POW_II_H = 0x98C + SYS_CACOSF = 0x99A + SYS_CACOSL = 0x99D + SYS_STRNCAT = 0x099 + SYS___CACOSF_B = 0x99B + SYS___CACOSF_H = 0x99C + SYS___CACOSL_B = 0x99E + SYS___CACOSL_H = 0x99F + SYS_ISWALPHA = 0x100 + SYS_ISWBLANK = 0x101 + SYS___ISWBLK = 0x101 + SYS_ISWCNTRL = 0x102 + SYS_ISWDIGIT = 0x103 + SYS_ISWGRAPH = 0x104 + SYS_ISWLOWER = 0x105 + SYS_ISWPRINT = 0x106 + SYS_ISWPUNCT = 0x107 + SYS_ISWSPACE = 0x108 + SYS_ISWUPPER = 0x109 + SYS_WCTOB = 0x110 + SYS_MBRLEN = 0x111 + SYS_MBRTOWC = 0x112 + SYS_MBSRTOWC = 0x113 + SYS_MBSRTOWCS = 0x113 + SYS_WCRTOMB = 0x114 + SYS_WCSRTOMB = 0x115 + SYS_WCSRTOMBS = 0x115 + SYS___CSID = 0x116 + SYS___WCSID = 0x117 + SYS_STRPTIME = 0x118 + SYS___STRPTM = 0x118 + SYS_STRFMON = 0x119 + SYS_WCSCOLL = 0x130 + SYS_WCSXFRM = 0x131 + SYS_WCSWIDTH = 0x132 + SYS_WCWIDTH = 0x133 + SYS_WCSFTIME = 0x134 + SYS_SWPRINTF = 0x135 + SYS_VSWPRINT = 0x136 + SYS_VSWPRINTF = 0x136 + SYS_SWSCANF = 0x137 + SYS_REGCOMP = 0x138 + SYS_REGEXEC = 0x139 + SYS_GETWC = 0x140 + SYS_GETWCHAR = 0x141 + SYS_PUTWC = 0x142 + SYS_PUTWCHAR = 0x143 + SYS_UNGETWC = 0x144 + SYS_ICONV_OPEN = 0x145 + SYS_ICONV = 0x146 + SYS_ICONV_CLOSE = 0x147 + SYS_COLLRANGE = 0x150 + SYS_CCLASS = 0x151 + SYS_COLLORDER = 0x152 + SYS___DEMANGLE = 0x154 + SYS_FDOPEN = 0x155 + SYS___ERRNO = 0x156 + SYS___ERRNO2 = 0x157 + SYS___TERROR = 0x158 + SYS_MAXCOLL = 0x169 + SYS_DLLLOAD = 0x170 + SYS__EXIT = 0x174 + SYS_ACCESS = 0x175 + SYS_ALARM = 0x176 + SYS_CFGETISPEED = 0x177 + SYS_CFGETOSPEED = 0x178 + SYS_CFSETISPEED = 0x179 + SYS_CREAT = 0x180 + SYS_CTERMID = 0x181 + SYS_DUP = 0x182 + SYS_DUP2 = 0x183 + SYS_EXECL = 0x184 + SYS_EXECLE = 0x185 + SYS_EXECLP = 0x186 + SYS_EXECV = 0x187 + SYS_EXECVE = 0x188 + SYS_EXECVP = 0x189 + SYS_FSTAT = 0x190 + SYS_FSYNC = 0x191 + SYS_FTRUNCATE = 0x192 + SYS_GETCWD = 0x193 + SYS_GETEGID = 0x194 + SYS_GETEUID = 0x195 + SYS_GETGID = 0x196 + SYS_GETGRGID = 0x197 + SYS_GETGRNAM = 0x198 + SYS_GETGROUPS = 0x199 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 + SYS_PTHREAD_MUTEX_INIT = 0x203 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 + SYS_PTHREAD_MUTEX_LOCK = 0x205 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 + SYS_PTHREAD_ONCE = 0x209 + SYS_TW_OPEN = 0x210 + SYS_TW_FCNTL = 0x211 + SYS_PTHREAD_JOIN_D4_NP = 0x212 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 + SYS_EXTLINK_NP = 0x215 + SYS___PASSWD = 0x216 + SYS_SETGROUPS = 0x217 + SYS_INITGROUPS = 0x218 + SYS_WCSRCHR = 0x240 + SYS_SVC99 = 0x241 + SYS___SVC99 = 0x241 + SYS_WCSWCS = 0x242 + SYS_LOCALECO = 0x243 + SYS_LOCALECONV = 0x243 + SYS___LIBREL = 0x244 + SYS_RELEASE = 0x245 + SYS___RLSE = 0x245 + SYS_FLOCATE = 0x246 + SYS___FLOCT = 0x246 + SYS_FDELREC = 0x247 + SYS___FDLREC = 0x247 + SYS_FETCH = 0x248 + SYS___FETCH = 0x248 + SYS_QSORT = 0x249 + SYS___CLEANUPCATCH = 0x260 + SYS___CATCHMATCH = 0x261 + SYS___CLEAN2UPCATCH = 0x262 + SYS_GETPRIORITY = 0x270 + SYS_NICE = 0x271 + SYS_SETPRIORITY = 0x272 + SYS_GETITIMER = 0x273 + SYS_SETITIMER = 0x274 + SYS_MSGCTL = 0x275 + SYS_MSGGET = 0x276 + SYS_MSGRCV = 0x277 + SYS_MSGSND = 0x278 + SYS_MSGXRCV = 0x279 + SYS___MSGXR = 0x279 + SYS_SHMGET = 0x280 + SYS___GETIPC = 0x281 + SYS_SETGRENT = 0x282 + SYS_GETGRENT = 0x283 + SYS_ENDGRENT = 0x284 + SYS_SETPWENT = 0x285 + SYS_GETPWENT = 0x286 + SYS_ENDPWENT = 0x287 + SYS_BSD_SIGNAL = 0x288 + SYS_KILLPG = 0x289 + SYS_SIGSET = 0x290 + SYS_SIGSTACK = 0x291 + SYS_GETRLIMIT = 0x292 + SYS_SETRLIMIT = 0x293 + SYS_GETRUSAGE = 0x294 + SYS_MMAP = 0x295 + SYS_MPROTECT = 0x296 + SYS_MSYNC = 0x297 + SYS_MUNMAP = 0x298 + SYS_CONFSTR = 0x299 + SYS___NDMTRM = 0x300 + SYS_FTOK = 0x301 + SYS_BASENAME = 0x302 + SYS_DIRNAME = 0x303 + SYS_GETDTABLESIZE = 0x304 + SYS_MKSTEMP = 0x305 + SYS_MKTEMP = 0x306 + SYS_NFTW = 0x307 + SYS_GETWD = 0x308 + SYS_LOCKF = 0x309 + SYS_WORDEXP = 0x310 + SYS_WORDFREE = 0x311 + SYS_GETPGID = 0x312 + SYS_GETSID = 0x313 + SYS___UTMPXNAME = 0x314 + SYS_CUSERID = 0x315 + SYS_GETPASS = 0x316 + SYS_FNMATCH = 0x317 + SYS_FTW = 0x318 + SYS_GETW = 0x319 + SYS_ACOSH = 0x320 + SYS_ASINH = 0x321 + SYS_ATANH = 0x322 + SYS_CBRT = 0x323 + SYS_EXPM1 = 0x324 + SYS_ILOGB = 0x325 + SYS_LOGB = 0x326 + SYS_LOG1P = 0x327 + SYS_NEXTAFTER = 0x328 + SYS_RINT = 0x329 + SYS_SPAWN = 0x330 + SYS_SPAWNP = 0x331 + SYS_GETLOGIN_UU = 0x332 + SYS_ECVT = 0x333 + SYS_FCVT = 0x334 + SYS_GCVT = 0x335 + SYS_ACCEPT = 0x336 + SYS_BIND = 0x337 + SYS_CONNECT = 0x338 + SYS_ENDHOSTENT = 0x339 + SYS_GETHOSTENT = 0x340 + SYS_GETHOSTID = 0x341 + SYS_GETHOSTNAME = 0x342 + SYS_GETNETBYADDR = 0x343 + SYS_GETNETBYNAME = 0x344 + SYS_GETNETENT = 0x345 + SYS_GETPEERNAME = 0x346 + SYS_GETPROTOBYNAME = 0x347 + SYS_GETPROTOBYNUMBER = 0x348 + SYS_GETPROTOENT = 0x349 + SYS_INET_LNAOF = 0x350 + SYS_INET_MAKEADDR = 0x351 + SYS_INET_NETOF = 0x352 + SYS_INET_NETWORK = 0x353 + SYS_INET_NTOA = 0x354 + SYS_IOCTL = 0x355 + SYS_LISTEN = 0x356 + SYS_READV = 0x357 + SYS_RECV = 0x358 + SYS_RECVFROM = 0x359 + SYS_SETHOSTENT = 0x360 + SYS_SETNETENT = 0x361 + SYS_SETPEER = 0x362 + SYS_SETPROTOENT = 0x363 + SYS_SETSERVENT = 0x364 + SYS_SETSOCKOPT = 0x365 + SYS_SHUTDOWN = 0x366 + SYS_SOCKET = 0x367 + SYS_SOCKETPAIR = 0x368 + SYS_WRITEV = 0x369 + SYS_ENDNETENT = 0x370 + SYS_CLOSELOG = 0x371 + SYS_OPENLOG = 0x372 + SYS_SETLOGMASK = 0x373 + SYS_SYSLOG = 0x374 + SYS_PTSNAME = 0x375 + SYS_SETREUID = 0x376 + SYS_SETREGID = 0x377 + SYS_REALPATH = 0x378 + SYS___SIGNGAM = 0x379 + SYS_POLL = 0x380 + SYS_REXEC = 0x381 + SYS___ISASCII2 = 0x382 + SYS___TOASCII2 = 0x383 + SYS_CHPRIORITY = 0x384 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 + SYS___STNETENT = 0x388 + SYS___STPROTOENT = 0x389 + SYS___SELECT1 = 0x390 + SYS_PTHREAD_SECURITY_NP = 0x391 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 + SYS___CONVERT_ID_NP = 0x393 + SYS___OPENVMREL = 0x394 + SYS_WMEMCHR = 0x395 + SYS_WMEMCMP = 0x396 + SYS_WMEMCPY = 0x397 + SYS_WMEMMOVE = 0x398 + SYS_WMEMSET = 0x399 + SYS___FPUTWC = 0x400 + SYS___PUTWC = 0x401 + SYS___PWCHAR = 0x402 + SYS___WCSFTM = 0x403 + SYS___WCSTOK = 0x404 + SYS___WCWDTH = 0x405 + SYS_T_ACCEPT = 0x409 + SYS_T_GETINFO = 0x410 + SYS_T_GETPROTADDR = 0x411 + SYS_T_GETSTATE = 0x412 + SYS_T_LISTEN = 0x413 + SYS_T_LOOK = 0x414 + SYS_T_OPEN = 0x415 + SYS_T_OPTMGMT = 0x416 + SYS_T_RCV = 0x417 + SYS_T_RCVCONNECT = 0x418 + SYS_T_RCVDIS = 0x419 + SYS_T_SNDUDATA = 0x420 + SYS_T_STRERROR = 0x421 + SYS_T_SYNC = 0x422 + SYS_T_UNBIND = 0x423 + SYS___T_ERRNO = 0x424 + SYS___RECVMSG2 = 0x425 + SYS___SENDMSG2 = 0x426 + SYS_FATTACH = 0x427 + SYS_FDETACH = 0x428 + SYS_GETMSG = 0x429 + SYS_GETCONTEXT = 0x430 + SYS_SETCONTEXT = 0x431 + SYS_MAKECONTEXT = 0x432 + SYS_SWAPCONTEXT = 0x433 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 + SYS_GETCLIENTID = 0x470 + SYS___GETCLIENTID = 0x471 + SYS_GETSTABLESIZE = 0x472 + SYS_GETIBMOPT = 0x473 + SYS_GETIBMSOCKOPT = 0x474 + SYS_GIVESOCKET = 0x475 + SYS_IBMSFLUSH = 0x476 + SYS_MAXDESC = 0x477 + SYS_SETIBMOPT = 0x478 + SYS_SETIBMSOCKOPT = 0x479 + SYS___SERVER_PWU = 0x480 + SYS_PTHREAD_TAG_NP = 0x481 + SYS___CONSOLE = 0x482 + SYS___WSINIT = 0x483 + SYS___IPTCPN = 0x489 + SYS___SERVER_CLASSIFY = 0x490 + SYS___HEAPRPT = 0x496 + SYS___ISBFP = 0x500 + SYS___FP_CAST = 0x501 + SYS___CERTIFICATE = 0x502 + SYS_SEND_FILE = 0x503 + SYS_AIO_CANCEL = 0x504 + SYS_AIO_ERROR = 0x505 + SYS_AIO_READ = 0x506 + SYS_AIO_RETURN = 0x507 + SYS_AIO_SUSPEND = 0x508 + SYS_AIO_WRITE = 0x509 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 + SYS___CTTBL = 0x517 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 + SYS___FP_UNORDERED = 0x520 + SYS___FP_READ_RND = 0x521 + SYS___FP_READ_RND_B = 0x522 + SYS___FP_SWAP_RND = 0x523 + SYS___FP_SWAP_RND_B = 0x524 + SYS___FP_LEVEL = 0x525 + SYS___FP_BTOH = 0x526 + SYS___FP_HTOB = 0x527 + SYS___FPC_RD = 0x528 + SYS___FPC_WR = 0x529 + SYS_PTHREAD_SETCANCELTYPE = 0x600 + SYS_PTHREAD_TESTCANCEL = 0x601 + SYS___ATANF_B = 0x602 + SYS___ATANL_B = 0x603 + SYS___CEILF_B = 0x604 + SYS___CEILL_B = 0x605 + SYS___COSF_B = 0x606 + SYS___COSL_B = 0x607 + SYS___FABSF_B = 0x608 + SYS___FABSL_B = 0x609 + SYS___SINF_B = 0x610 + SYS___SINL_B = 0x611 + SYS___TANF_B = 0x612 + SYS___TANL_B = 0x613 + SYS___TANHF_B = 0x614 + SYS___TANHL_B = 0x615 + SYS___ACOSF_B = 0x616 + SYS___ACOSL_B = 0x617 + SYS___ASINF_B = 0x618 + SYS___ASINL_B = 0x619 + SYS___LOGF_B = 0x620 + SYS___LOGL_B = 0x621 + SYS___LOG10F_B = 0x622 + SYS___LOG10L_B = 0x623 + SYS___POWF_B = 0x624 + SYS___POWL_B = 0x625 + SYS___SINHF_B = 0x626 + SYS___SINHL_B = 0x627 + SYS___SQRTF_B = 0x628 + SYS___SQRTL_B = 0x629 + SYS___MODFL_B = 0x630 + SYS_ABSF = 0x631 + SYS_ABSL = 0x632 + SYS_ACOSF = 0x633 + SYS_ACOSL = 0x634 + SYS_ASINF = 0x635 + SYS_ASINL = 0x636 + SYS_ATAN2F = 0x637 + SYS_ATAN2L = 0x638 + SYS_ATANF = 0x639 + SYS_COSHL = 0x640 + SYS_EXPF = 0x641 + SYS_EXPL = 0x642 + SYS_TANHF = 0x643 + SYS_TANHL = 0x644 + SYS_LOG10F = 0x645 + SYS_LOG10L = 0x646 + SYS_LOGF = 0x647 + SYS_LOGL = 0x648 + SYS_POWF = 0x649 + SYS_SINHL = 0x650 + SYS_TANF = 0x651 + SYS_TANL = 0x652 + SYS_FABSF = 0x653 + SYS_FABSL = 0x654 + SYS_FLOORF = 0x655 + SYS_FLOORL = 0x656 + SYS_FMODF = 0x657 + SYS_FMODL = 0x658 + SYS_FREXPF = 0x659 + SYS___CHATTR = 0x660 + SYS___FCHATTR = 0x661 + SYS___TOCCSID = 0x662 + SYS___CSNAMETYPE = 0x663 + SYS___TOCSNAME = 0x664 + SYS___CCSIDTYPE = 0x665 + SYS___AE_CORRESTBL_QUERY = 0x666 + SYS___AE_AUTOCONVERT_STATE = 0x667 + SYS_DN_FIND = 0x668 + SYS___GETHOSTBYADDR_A = 0x669 + SYS___MBLEN_SB_A = 0x670 + SYS___MBLEN_STD_A = 0x671 + SYS___MBLEN_UTF = 0x672 + SYS___MBSTOWCS_A = 0x673 + SYS___MBSTOWCS_STD_A = 0x674 + SYS___MBTOWC_A = 0x675 + SYS___MBTOWC_ISO1 = 0x676 + SYS___MBTOWC_SBCS = 0x677 + SYS___MBTOWC_MBCS = 0x678 + SYS___MBTOWC_UTF = 0x679 + SYS___CSID_A = 0x680 + SYS___CSID_STD_A = 0x681 + SYS___WCSID_A = 0x682 + SYS___WCSID_STD_A = 0x683 + SYS___WCTOMB_A = 0x684 + SYS___WCTOMB_ISO1 = 0x685 + SYS___WCTOMB_STD_A = 0x686 + SYS___WCTOMB_UTF = 0x687 + SYS___WCWIDTH_A = 0x688 + SYS___GETGRNAM_R_A = 0x689 + SYS___READDIR_R_A = 0x690 + SYS___E2A_S = 0x691 + SYS___FNMATCH_A = 0x692 + SYS___FNMATCH_C_A = 0x693 + SYS___EXECL_A = 0x694 + SYS___FNMATCH_STD_A = 0x695 + SYS___REGCOMP_A = 0x696 + SYS___REGCOMP_STD_A = 0x697 + SYS___REGERROR_A = 0x698 + SYS___REGERROR_STD_A = 0x699 + SYS___SWPRINTF_A = 0x700 + SYS___FSCANF_A = 0x701 + SYS___SCANF_A = 0x702 + SYS___SSCANF_A = 0x703 + SYS___SWSCANF_A = 0x704 + SYS___ATOF_A = 0x705 + SYS___ATOI_A = 0x706 + SYS___ATOL_A = 0x707 + SYS___STRTOD_A = 0x708 + SYS___STRTOL_A = 0x709 + SYS___L64A_A = 0x710 + SYS___STRERROR_A = 0x711 + SYS___PERROR_A = 0x712 + SYS___FETCH_A = 0x713 + SYS___GETENV_A = 0x714 + SYS___MKSTEMP_A = 0x717 + SYS___PTSNAME_A = 0x718 + SYS___PUTENV_A = 0x719 + SYS___CHDIR_A = 0x720 + SYS___CHOWN_A = 0x721 + SYS___CHROOT_A = 0x722 + SYS___GETCWD_A = 0x723 + SYS___GETWD_A = 0x724 + SYS___LCHOWN_A = 0x725 + SYS___LINK_A = 0x726 + SYS___PATHCONF_A = 0x727 + SYS___IF_NAMEINDEX_A = 0x728 + SYS___READLINK_A = 0x729 + SYS___EXTLINK_NP_A = 0x730 + SYS___ISALNUM_A = 0x731 + SYS___ISALPHA_A = 0x732 + SYS___A2E_S = 0x733 + SYS___ISCNTRL_A = 0x734 + SYS___ISDIGIT_A = 0x735 + SYS___ISGRAPH_A = 0x736 + SYS___ISLOWER_A = 0x737 + SYS___ISPRINT_A = 0x738 + SYS___ISPUNCT_A = 0x739 + SYS___ISWALPHA_A = 0x740 + SYS___A2E_L = 0x741 + SYS___ISWCNTRL_A = 0x742 + SYS___ISWDIGIT_A = 0x743 + SYS___ISWGRAPH_A = 0x744 + SYS___ISWLOWER_A = 0x745 + SYS___ISWPRINT_A = 0x746 + SYS___ISWPUNCT_A = 0x747 + SYS___ISWSPACE_A = 0x748 + SYS___ISWUPPER_A = 0x749 + SYS___REMOVE_A = 0x750 + SYS___RENAME_A = 0x751 + SYS___TMPNAM_A = 0x752 + SYS___FOPEN_A = 0x753 + SYS___FREOPEN_A = 0x754 + SYS___CUSERID_A = 0x755 + SYS___POPEN_A = 0x756 + SYS___TEMPNAM_A = 0x757 + SYS___FTW_A = 0x758 + SYS___GETGRENT_A = 0x759 + SYS___INET_NTOP_A = 0x760 + SYS___GETPASS_A = 0x761 + SYS___GETPWENT_A = 0x762 + SYS___GETPWNAM_A = 0x763 + SYS___GETPWUID_A = 0x764 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 + SYS___CHECKSCHENV_A = 0x766 + SYS___CONNECTSERVER_A = 0x767 + SYS___CONNECTWORKMGR_A = 0x768 + SYS_____CONSOLE_A = 0x769 + SYS___MSGSND_A = 0x770 + SYS___MSGXRCV_A = 0x771 + SYS___NFTW_A = 0x772 + SYS_____PASSWD_A = 0x773 + SYS___PTHREAD_SECURITY_NP_A = 0x774 + SYS___QUERYMETRICS_A = 0x775 + SYS___QUERYSCHENV = 0x776 + SYS___READV_A = 0x777 + SYS_____SERVER_CLASSIFY_A = 0x778 + SYS_____SERVER_INIT_A = 0x779 + SYS___W_GETPSENT_A = 0x780 + SYS___WRITEV_A = 0x781 + SYS___W_STATFS_A = 0x782 + SYS___W_STATVFS_A = 0x783 + SYS___FPUTC_A = 0x784 + SYS___PUTCHAR_A = 0x785 + SYS___PUTS_A = 0x786 + SYS___FGETS_A = 0x787 + SYS___GETS_A = 0x788 + SYS___FPUTS_A = 0x789 + SYS___PUTC_A = 0x790 + SYS___AE_THREAD_SETMODE = 0x791 + SYS___AE_THREAD_SWAPMODE = 0x792 + SYS___GETNETBYADDR_A = 0x793 + SYS___GETNETBYNAME_A = 0x794 + SYS___GETNETENT_A = 0x795 + SYS___GETPROTOBYNAME_A = 0x796 + SYS___GETPROTOBYNUMBER_A = 0x797 + SYS___GETPROTOENT_A = 0x798 + SYS___GETSERVBYNAME_A = 0x799 + SYS_ACL_FIRST_ENTRY = 0x800 + SYS_ACL_GET_ENTRY = 0x801 + SYS_ACL_VALID = 0x802 + SYS_ACL_CREATE_ENTRY = 0x803 + SYS_ACL_DELETE_ENTRY = 0x804 + SYS_ACL_UPDATE_ENTRY = 0x805 + SYS_ACL_DELETE_FD = 0x806 + SYS_ACL_DELETE_FILE = 0x807 + SYS_ACL_GET_FD = 0x808 + SYS_ACL_GET_FILE = 0x809 + SYS___ERFL_B = 0x810 + SYS___ERFCL_B = 0x811 + SYS___LGAMMAL_B = 0x812 + SYS___SETHOOKEVENTS = 0x813 + SYS_IF_NAMETOINDEX = 0x814 + SYS_IF_INDEXTONAME = 0x815 + SYS_IF_NAMEINDEX = 0x816 + SYS_IF_FREENAMEINDEX = 0x817 + SYS_GETADDRINFO = 0x818 + SYS_GETNAMEINFO = 0x819 + SYS___DYNFREE_A = 0x820 + SYS___RES_QUERY_A = 0x821 + SYS___RES_SEARCH_A = 0x822 + SYS___RES_QUERYDOMAIN_A = 0x823 + SYS___RES_MKQUERY_A = 0x824 + SYS___RES_SEND_A = 0x825 + SYS___DN_EXPAND_A = 0x826 + SYS___DN_SKIPNAME_A = 0x827 + SYS___DN_COMP_A = 0x828 + SYS___DN_FIND_A = 0x829 + SYS___INET_NTOA_A = 0x830 + SYS___INET_NETWORK_A = 0x831 + SYS___ACCEPT_A = 0x832 + SYS___ACCEPT_AND_RECV_A = 0x833 + SYS___BIND_A = 0x834 + SYS___CONNECT_A = 0x835 + SYS___GETPEERNAME_A = 0x836 + SYS___GETSOCKNAME_A = 0x837 + SYS___RECVFROM_A = 0x838 + SYS___SENDTO_A = 0x839 + SYS___LCHATTR = 0x840 + SYS___WRITEDOWN = 0x841 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 + SYS___ACOSHF_B = 0x843 + SYS___ACOSHL_B = 0x844 + SYS___ASINHF_B = 0x845 + SYS___ASINHL_B = 0x846 + SYS___ATANHF_B = 0x847 + SYS___ATANHL_B = 0x848 + SYS___CBRTF_B = 0x849 + SYS___EXP2F_B = 0x850 + SYS___EXP2L_B = 0x851 + SYS___EXPM1F_B = 0x852 + SYS___EXPM1L_B = 0x853 + SYS___FDIMF_B = 0x854 + SYS___FDIM_B = 0x855 + SYS___FDIML_B = 0x856 + SYS___HYPOTF_B = 0x857 + SYS___HYPOTL_B = 0x858 + SYS___LOG1PF_B = 0x859 + SYS___REMQUOF_B = 0x860 + SYS___REMQUO_B = 0x861 + SYS___REMQUOL_B = 0x862 + SYS___TGAMMAF_B = 0x863 + SYS___TGAMMA_B = 0x864 + SYS___TGAMMAL_B = 0x865 + SYS___TRUNCF_B = 0x866 + SYS___TRUNC_B = 0x867 + SYS___TRUNCL_B = 0x868 + SYS___LGAMMAF_B = 0x869 + SYS_ASINHF = 0x870 + SYS_ASINHL = 0x871 + SYS_ATANHF = 0x872 + SYS_ATANHL = 0x873 + SYS_CBRTF = 0x874 + SYS_CBRTL = 0x875 + SYS_COPYSIGNF = 0x876 + SYS_CPYSIGNF = 0x876 + SYS_COPYSIGNL = 0x877 + SYS_CPYSIGNL = 0x877 + SYS_COTANF = 0x878 + SYS___COTANF = 0x878 + SYS_COTAN = 0x879 + SYS___COTAN = 0x879 + SYS_FDIM = 0x881 + SYS_FDIML = 0x882 + SYS_HYPOTF = 0x883 + SYS_HYPOTL = 0x884 + SYS_LOG1PF = 0x885 + SYS_LOG1PL = 0x886 + SYS_LOG2F = 0x887 + SYS_LOG2 = 0x888 + SYS_LOG2L = 0x889 + SYS_TGAMMA = 0x890 + SYS_TGAMMAL = 0x891 + SYS_TRUNCF = 0x892 + SYS_TRUNC = 0x893 + SYS_TRUNCL = 0x894 + SYS_LGAMMAF = 0x895 + SYS_LGAMMAL = 0x896 + SYS_LROUNDF = 0x897 + SYS_LROUND = 0x898 + SYS_ERFF = 0x899 + SYS___COSHF_H = 0x900 + SYS___COSHL_H = 0x901 + SYS___COTAN_H = 0x902 + SYS___COTANF_H = 0x903 + SYS___COTANL_H = 0x904 + SYS___ERF_H = 0x905 + SYS___ERFF_H = 0x906 + SYS___ERFL_H = 0x907 + SYS___ERFC_H = 0x908 + SYS___ERFCF_H = 0x909 + SYS___FDIMF_H = 0x910 + SYS___FDIML_H = 0x911 + SYS___FMOD_H = 0x912 + SYS___FMODF_H = 0x913 + SYS___FMODL_H = 0x914 + SYS___GAMMA_H = 0x915 + SYS___HYPOT_H = 0x916 + SYS___ILOGB_H = 0x917 + SYS___LGAMMA_H = 0x918 + SYS___LGAMMAF_H = 0x919 + SYS___LOG2L_H = 0x920 + SYS___LOG1P_H = 0x921 + SYS___LOG10_H = 0x922 + SYS___LOG10F_H = 0x923 + SYS___LOG10L_H = 0x924 + SYS___LROUND_H = 0x925 + SYS___LROUNDF_H = 0x926 + SYS___NEXTAFTER_H = 0x927 + SYS___POW_H = 0x928 + SYS___POWF_H = 0x929 + SYS___SINL_H = 0x930 + SYS___SINH_H = 0x931 + SYS___SINHF_H = 0x932 + SYS___SINHL_H = 0x933 + SYS___SQRT_H = 0x934 + SYS___SQRTF_H = 0x935 + SYS___SQRTL_H = 0x936 + SYS___TAN_H = 0x937 + SYS___TANF_H = 0x938 + SYS___TANL_H = 0x939 + SYS___TRUNCF_H = 0x940 + SYS___TRUNCL_H = 0x941 + SYS___COSH_H = 0x942 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 + SYS_VFSCANF = 0x944 + SYS_VSCANF = 0x946 + SYS_VSSCANF = 0x948 + SYS_IMAXABS = 0x950 + SYS_IMAXDIV = 0x951 + SYS_STRTOIMAX = 0x952 + SYS_STRTOUMAX = 0x953 + SYS_WCSTOIMAX = 0x954 + SYS_WCSTOUMAX = 0x955 + SYS_ATOLL = 0x956 + SYS_STRTOF = 0x957 + SYS_STRTOLD = 0x958 + SYS_WCSTOF = 0x959 + SYS_INET6_RTH_GETADDR = 0x960 + SYS_INET6_OPT_INIT = 0x961 + SYS_INET6_OPT_APPEND = 0x962 + SYS_INET6_OPT_FINISH = 0x963 + SYS_INET6_OPT_SET_VAL = 0x964 + SYS_INET6_OPT_NEXT = 0x965 + SYS_INET6_OPT_FIND = 0x966 + SYS_INET6_OPT_GET_VAL = 0x967 + SYS___POW_I = 0x987 + SYS___POW_I_B = 0x988 + SYS___POW_I_H = 0x989 + SYS___CABS_H = 0x990 + SYS_CABSF = 0x991 + SYS___CABSF_B = 0x992 + SYS___CABSF_H = 0x993 + SYS_CABSL = 0x994 + SYS___CABSL_B = 0x995 + SYS___CABSL_H = 0x996 + SYS_CACOS = 0x997 + SYS___CACOS_B = 0x998 + SYS___CACOS_H = 0x999 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 3e6d57ca..7a8161c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix +// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 3a219bdc..07ed733c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix +// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..690cefc3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin +// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..5bffc10e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin +// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 30e405bb..d0ba8e9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly +// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..29dc4833 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd +// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..0a89b289 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd +// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..c8666bb1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd +// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..88fb48a8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd +// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..698dc975 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd +// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746e..18aa70b4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,6 +1,7 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux +// +build linux package unix @@ -174,8 +175,7 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - Log2_data_unit_size uint8 - _ [3]uint8 + _ [4]uint8 Master_key_identifier [16]uint8 } @@ -456,63 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 - Total_rto uint16 - Total_rto_recoveries uint16 - Total_rto_time uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -555,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf8 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -836,15 +833,6 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 - - FSCONFIG_SET_FLAG = 0x0 - FSCONFIG_SET_STRING = 0x1 - FSCONFIG_SET_BINARY = 0x2 - FSCONFIG_SET_PATH = 0x3 - FSCONFIG_SET_PATH_EMPTY = 0x4 - FSCONFIG_SET_FD = 0x5 - FSCONFIG_CMD_CREATE = 0x6 - FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1178,8 +1166,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1199,7 +1186,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x100000 + PERF_SAMPLE_BRANCH_MAX = 0x80000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1560,7 +1547,6 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 - IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1576,7 +1562,6 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 - IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1624,9 +1609,6 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e - IFLA_BR_MCAST_QUERIER_STATE = 0x2f - IFLA_BR_FDB_N_LEARNED = 0x30 - IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1664,14 +1646,6 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 - IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 - IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 - IFLA_BRPORT_LOCKED = 0x27 - IFLA_BRPORT_MAB = 0x28 - IFLA_BRPORT_MCAST_N_GROUPS = 0x29 - IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a - IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b - IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1693,9 +1667,6 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 - IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 - IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 - IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1719,22 +1690,9 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 - IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 - IFLA_NETKIT_UNSPEC = 0x0 - IFLA_NETKIT_PEER_INFO = 0x1 - IFLA_NETKIT_PRIMARY = 0x2 - IFLA_NETKIT_POLICY = 0x3 - IFLA_NETKIT_PEER_POLICY = 0x4 - IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1765,8 +1723,6 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d - IFLA_VXLAN_VNIFILTER = 0x1e - IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1781,7 +1737,6 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd - IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1794,8 +1749,6 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 - IFLA_GTP_CREATE_SOCKETS = 0x5 - IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1825,9 +1778,6 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c - IFLA_BOND_AD_LACP_ACTIVE = 0x1d - IFLA_BOND_MISSED_MAX = 0x1e - IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1843,7 +1793,6 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 - IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1902,16 +1851,8 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 - IFLA_STATS_GETSET_UNSPEC = 0x0 - IFLA_STATS_GET_FILTERS = 0x1 - IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 - IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 - IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 - IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 - IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 - IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1941,11 +1882,6 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 - IFLA_MCTP_UNSPEC = 0x0 - IFLA_MCTP_NET = 0x1 - IFLA_DSA_UNSPEC = 0x0 - IFLA_DSA_CONDUIT = 0x1 - IFLA_DSA_MASTER = 0x1 ) const ( @@ -2482,15 +2418,6 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Chunk_size uint32 - Headroom uint32 - Flags uint32 - Tx_metadata_len uint32 -} - type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2745,7 +2672,6 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f - BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2790,11 +2716,6 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b - BPF_STRUCT_OPS = 0x2c - BPF_NETFILTER = 0x2d - BPF_TCX_INGRESS = 0x2e - BPF_TCX_EGRESS = 0x2f - BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2805,18 +2726,6 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 - BPF_LINK_TYPE_NETFILTER = 0xa - BPF_LINK_TYPE_TCX = 0xb - BPF_LINK_TYPE_UPROBE_MULTI = 0xc - BPF_PERF_EVENT_UNSPEC = 0x0 - BPF_PERF_EVENT_UPROBE = 0x1 - BPF_PERF_EVENT_URETPROBE = 0x2 - BPF_PERF_EVENT_KPROBE = 0x3 - BPF_PERF_EVENT_KRETPROBE = 0x4 - BPF_PERF_EVENT_TRACEPOINT = 0x5 - BPF_PERF_EVENT_EVENT = 0x6 - BPF_F_KPROBE_MULTI_RETURN = 0x1 - BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2834,8 +2743,6 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 - BPF_F_LINK = 0x2000 - BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2856,7 +2763,6 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 - BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2873,8 +2779,6 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 - BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 - BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2945,7 +2849,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xe + BPF_TCP_MAX_STATES = 0xd TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -2963,8 +2867,6 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 - BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 - BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -3000,7 +2902,6 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc - BPF_F_TIMER_ABS = 0x1 ) const ( @@ -3079,12 +2980,6 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } -type LoopConfig struct { - Fd uint32 - Size uint32 - Info LoopInfo64 - _ [8]uint64 -} type TIPCSocketAddr struct { Ref uint32 @@ -3221,7 +3116,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x54 + DEVLINK_CMD_MAX = 0x53 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3473,7 +3368,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -4257,8 +4152,7 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 - Access_net uint64 + Access_fs uint64 } type LandlockPathBeneathAttr struct { @@ -4605,7 +4499,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x146 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4871,7 +4765,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x18 + NL80211_BSS_MAX = 0x16 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4975,7 +4869,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_MAX = 0x9a NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5209,7 +5103,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x1b NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5622,7 +5516,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x8 + NL80211_REG_RULE_ATTR_MAX = 0x7 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 @@ -5989,15 +5883,3 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 - -type Cachestat_t struct { - Cache uint64 - Dirty uint64 - Writeback uint64 - Evicted uint64 - Recently_evicted uint64 -} -type CachestatRange struct { - Off uint64 - Len uint64 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da4..6d8acbcc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux +// +build 386,linux package unix @@ -477,6 +478,14 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e18..59293c68 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux +// +build amd64,linux package unix @@ -492,6 +493,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108..40cfa38c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux +// +build arm,linux package unix @@ -470,6 +471,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f..055bc421 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux +// +build arm64,linux package unix @@ -471,6 +472,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26..f28affbc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux +// +build loong64,linux package unix @@ -472,6 +473,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2..9d71e7cc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux +// +build mips,linux package unix @@ -476,6 +477,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d453..fd5ccd33 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux +// +build mips64,linux package unix @@ -474,6 +475,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea18..7704de77 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux +// +build mips64le,linux package unix @@ -474,6 +475,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c..df00b875 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux +// +build mipsle,linux package unix @@ -476,6 +477,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 83597287..0942840d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux +// +build ppc,linux package unix @@ -482,6 +483,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c..03487439 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux +// +build ppc64,linux package unix @@ -481,6 +482,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb6..bad06704 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux +// +build ppc64le,linux package unix @@ -481,6 +482,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..1b4c97c3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux +// +build riscv64,linux package unix @@ -499,6 +500,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce900..aa268d02 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux +// +build s390x,linux package unix @@ -495,6 +496,15 @@ const ( BLKPG = 0x1269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b5673..444045b6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux +// +build sparc64,linux package unix @@ -476,6 +477,15 @@ const ( BLKPG = 0x20001269 ) +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index f22e7947..9bc4c8f9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd +// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 066a7d83..bb05f655 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd +// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec..db40e3a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd +// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 16085d3b..11121151 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd +// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index afd13a3a..26eba23b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd +// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5d97f1f9..5a547988 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd +// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 34871cdc..be58c4e1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd +// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 5911bceb..52338266 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd +// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index e4f24f3b..605cfdb1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd +// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index ca50a793..d6724c01 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd +// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index d7d7f790..ddfd27a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd +// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 14160576..0400747c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,6 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris +// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4..aec1efcb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build zos && s390x +// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. @@ -25,13 +26,10 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 - SizeofInet4Pktinfo = 8 - SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 - SizeofUcred = 12 ) type ( @@ -72,17 +70,12 @@ type Utimbuf struct { } type Utsname struct { - Sysname [16]byte - Nodename [32]byte - Release [8]byte - Version [8]byte - Machine [16]byte -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type RawSockaddrInet4 struct { @@ -333,7 +326,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint64 + Type uint32 Bsize uint64 Blocks uint64 Bfree uint64 @@ -344,7 +337,6 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 - _ [4]uint64 } type direntLE struct { @@ -421,126 +413,3 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } - -type EpollEvent struct { - Events uint32 - _ int32 - Fd int32 - Pad int32 -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 - Name string -} - -const ( - SizeofInotifyEvent = 0x10 -) - -type ConsMsg2 struct { - Cm2Format uint16 - Cm2R1 uint16 - Cm2Msglength uint32 - Cm2Msg *byte - Cm2R2 [4]byte - Cm2R3 [4]byte - Cm2Routcde *uint32 - Cm2Descr *uint32 - Cm2Msgflag uint32 - Cm2Token uint32 - Cm2Msgid *uint32 - Cm2R4 [4]byte - Cm2DomToken uint32 - Cm2DomMsgid *uint32 - Cm2ModCartptr *byte - Cm2ModConsidptr *byte - Cm2MsgCart [8]byte - Cm2MsgConsid [4]byte - Cm2R5 [12]byte -} - -const ( - CC_modify = 1 - CC_stop = 2 - CONSOLE_FORMAT_2 = 2 - CONSOLE_FORMAT_3 = 3 - CONSOLE_HRDCPY = 0x80000000 -) - -type OpenHow struct { - Flags uint64 - Mode uint64 - Resolve uint64 -} - -const SizeofOpenHow = 0x18 - -const ( - RESOLVE_CACHED = 0x20 - RESOLVE_BENEATH = 0x8 - RESOLVE_IN_ROOT = 0x10 - RESOLVE_NO_MAGICLINKS = 0x2 - RESOLVE_NO_SYMLINKS = 0x4 - RESOLVE_NO_XDEV = 0x1 -) - -type Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - _ [44]byte -} - -type SysvIpcPerm struct { - Uid uint32 - Gid uint32 - Cuid uint32 - Cgid uint32 - Mode int32 -} - -type SysvShmDesc struct { - Perm SysvIpcPerm - _ [4]byte - Lpid int32 - Cpid int32 - Nattch uint32 - _ [4]byte - _ [4]byte - _ [4]byte - _ int32 - _ uint8 - _ uint8 - _ uint16 - _ *byte - Segsz uint64 - Atime Time_t - Dtime Time_t - Ctime Time_t -} - -type SysvShmDesc64 struct { - Perm SysvIpcPerm - _ [4]byte - Lpid int32 - Cpid int32 - Nattch uint32 - _ [4]byte - _ [4]byte - _ [4]byte - _ int32 - _ byte - _ uint8 - _ uint16 - _ *byte - Segsz uint64 - Atime int64 - Dtime int64 - Ctime int64 -} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index 16f90560..a20ebea6 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows +//go:build windows && go1.9 +// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s new file mode 100644 index 00000000..fdbbbcd3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.12 +// +build !go1.12 + +// This file is here to allow bodyless functions with go:linkname for Go 1.11 +// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index d4577a42..b8ad1925 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,17 +37,14 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - size := unsafe.Sizeof(*block) - for *block != 0 { - // find NUL terminator - end := unsafe.Pointer(block) - for *(*uint16)(end) != 0 { - end = unsafe.Add(end, size) + blockp := unsafe.Pointer(block) + for { + entry := UTF16PtrToString((*uint16)(blockp)) + if len(entry) == 0 { + break } - - entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) - env = append(env, UTF16ToString(entry)) - block = (*uint16)(unsafe.Add(end, size)) + env = append(env, entry) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 6c366955..2cd60645 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index dbcdb090..8563f79c 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build generate +// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 0f1bdc38..9196b089 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows && race +// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 0c78da78..7bae4817 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows && !race +// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index fd863244..6c8d97b6 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows // Package registry provides access to the Windows registry. // diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go index bbf86ccf..ee74927d 100644 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build generate +// +build generate package registry diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index f533091c..41733512 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 74db26b9..2789f6f1 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index a9dc6308..c44a1b96 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 6a4f9ce6..4fc01434 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index e85ed6b9..8732cdb9 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build windows +// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f..35cfc57c 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,7 +125,8 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - return UTF16ToString(unsafe.Slice(p, n)) + + return string(utf16.Decode(unsafe.Slice(p, n))) } func Getpagesize() int { return 4096 } @@ -154,8 +155,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) -//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory -//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -165,7 +164,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) -//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -194,7 +192,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) -//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -236,7 +233,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 -//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -349,19 +345,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) -//sys ClearCommBreak(handle Handle) (err error) -//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) -//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) -//sys GetCommState(handle Handle, lpDCB *DCB) (err error) -//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) -//sys PurgeComm(handle Handle, dwFlags uint32) (err error) -//sys SetCommBreak(handle Handle) (err error) -//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) -//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) -//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) -//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -984,8 +969,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { - // Check sl > 3 so we don't change unnamed socket behavior. + if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1846,73 +1830,3 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } - -// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. -const ( - CBR_110 = 110 - CBR_300 = 300 - CBR_600 = 600 - CBR_1200 = 1200 - CBR_2400 = 2400 - CBR_4800 = 4800 - CBR_9600 = 9600 - CBR_14400 = 14400 - CBR_19200 = 19200 - CBR_38400 = 38400 - CBR_57600 = 57600 - CBR_115200 = 115200 - CBR_128000 = 128000 - CBR_256000 = 256000 - - DTR_CONTROL_DISABLE = 0x00000000 - DTR_CONTROL_ENABLE = 0x00000010 - DTR_CONTROL_HANDSHAKE = 0x00000020 - - RTS_CONTROL_DISABLE = 0x00000000 - RTS_CONTROL_ENABLE = 0x00001000 - RTS_CONTROL_HANDSHAKE = 0x00002000 - RTS_CONTROL_TOGGLE = 0x00003000 - - NOPARITY = 0 - ODDPARITY = 1 - EVENPARITY = 2 - MARKPARITY = 3 - SPACEPARITY = 4 - - ONESTOPBIT = 0 - ONE5STOPBITS = 1 - TWOSTOPBITS = 2 -) - -// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. -const ( - SETXOFF = 1 - SETXON = 2 - SETRTS = 3 - CLRRTS = 4 - SETDTR = 5 - CLRDTR = 6 - SETBREAK = 8 - CLRBREAK = 9 -) - -// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. -const ( - PURGE_TXABORT = 0x0001 - PURGE_RXABORT = 0x0002 - PURGE_TXCLEAR = 0x0004 - PURGE_RXCLEAR = 0x0008 -) - -// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. -const ( - EV_RXCHAR = 0x0001 - EV_RXFLAG = 0x0002 - EV_TXEMPTY = 0x0004 - EV_CTS = 0x0008 - EV_DSR = 0x0010 - EV_RLSD = 0x0020 - EV_BREAK = 0x0040 - EV_ERR = 0x0080 - EV_RING = 0x0100 -) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db..b88dc7c8 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,33 +1094,7 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 - TCP_EXPEDITED_1122 = 2 - TCP_KEEPALIVE = 3 - TCP_MAXSEG = 4 - TCP_MAXRT = 5 - TCP_STDURG = 6 - TCP_NOURG = 7 - TCP_ATMARK = 8 - TCP_NOSYNRETRIES = 9 - TCP_TIMESTAMPS = 10 - TCP_OFFLOAD_PREFERENCE = 11 - TCP_CONGESTION_ALGORITHM = 12 - TCP_DELAY_FIN_ACK = 13 - TCP_MAXRTMS = 14 - TCP_FASTOPEN = 15 - TCP_KEEPCNT = 16 - TCP_KEEPIDLE = TCP_KEEPALIVE - TCP_KEEPINTVL = 17 - TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 - TCP_ICMP_ERROR_INFO = 19 - - UDP_NOCHECKSUM = 1 - UDP_SEND_MSG_SIZE = 2 - UDP_RECV_MAX_COALESCED_SIZE = 3 - UDP_CHECKSUM_COVERAGE = 20 - - UDP_COALESCED_INFO = 3 + TCP_NODELAY = 1 SHUT_RD = 0 SHUT_WR = 1 @@ -3380,27 +3354,3 @@ type BLOB struct { Size uint32 BlobData *byte } - -type ComStat struct { - Flags uint32 - CBInQue uint32 - CBOutQue uint32 -} - -type DCB struct { - DCBlength uint32 - BaudRate uint32 - Flags uint32 - wReserved uint16 - XonLim uint16 - XoffLim uint16 - ByteSize uint8 - Parity uint8 - StopBits uint8 - XonChar byte - XoffChar byte - ErrorChar byte - EofChar byte - EvtChar byte - wReserved1 uint16 -} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035dd..8b1688de 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,12 +184,9 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procClearCommBreak = modkernel32.NewProc("ClearCommBreak") - procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -214,9 +211,7 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -240,8 +235,6 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") - procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -260,7 +253,6 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -328,7 +320,6 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") - procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -338,13 +329,9 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") - procSetCommBreak = modkernel32.NewProc("SetCommBreak") - procSetCommMask = modkernel32.NewProc("SetCommMask") - procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -360,7 +347,6 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -371,7 +357,6 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -390,7 +375,6 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") - procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1620,15 +1604,6 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } -func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - cookie = uintptr(r0) - if cookie == 0 { - err = errnoErr(e1) - } - return -} - func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -1653,22 +1628,6 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } -func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1873,14 +1832,6 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } -func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1893,14 +1844,6 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } -func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2102,22 +2045,6 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } -func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2258,14 +2185,6 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } -func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2870,14 +2789,6 @@ func PulseEvent(event Handle) (err error) { return } -func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2959,14 +2870,6 @@ func RemoveDirectory(path *uint16) (err error) { return } -func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2992,30 +2895,6 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } -func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -3144,14 +3023,6 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3237,14 +3108,6 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } -func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3391,14 +3254,6 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } -func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 784bb880..8a7392c4 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.10 +// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 8e1e9439..bb0a9200 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !go1.10 +// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d2bd7118..42fa8d72 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index f76bdca2..56a0e1ea 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 3aa2c3bd..baacf32b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index a7137579..ffadb7be 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index f15746f7..92cce580 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 +// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index c164d379..f517fdb2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 +// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 1af161c7..f5a07882 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index eb73ecc3..cb7239c4 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 276cb8d8..11b27330 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 0cceffd7..f65785e8 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index b0819e42..e1858b87 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 +// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index bf65457d..0175eae5 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 +// +build !go1.10 package norm diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/LICENSE similarity index 100% rename from vendor/google.golang.org/genproto/googleapis/rpc/LICENSE rename to vendor/google.golang.org/genproto/LICENSE diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index a6b50818..f34a38e4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.12.2 // source: google/rpc/status.proto package status @@ -48,13 +48,11 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1..52338d00 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,15 +20,6 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. - -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. - - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -41,18 +32,14 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79..0e6ae69a 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,14 +14,21 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -Simply add the following import to your code, and then `go [build|run|test]` -will automatically fetch the necessary dependencies: - +With [Go module][] support (Go 1.11+), simply add the following import ```go import "google.golang.org/grpc" ``` +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -49,6 +56,15 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. +- Without Go module support: `git clone` the repo manually: + + ```sh + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -60,13 +76,33 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue - #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -Please update to the latest version of gRPC-Go using -`go get google.golang.org/grpc`. +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` ### How to turn on logging @@ -85,11 +121,9 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have - configured your server to terminate connections regularly to [trigger DNS - lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your - [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 52d530d7..02f5dc53 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,35 +25,30 @@ // later release. package attributes -import ( - "fmt" - "strings" -) - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// any) bool', it will be called by (*Attributes).Equal to determine whether -// two values with the same key should be considered equal. +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { - m map[any]any + m map[interface{}]interface{} } // New returns a new Attributes containing the key/value pair. -func New(key, value any) *Attributes { - return &Attributes{m: map[any]any{key: value}} +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value any) *Attributes { +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[any]any, len(a.m)+1)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,19 +58,20 @@ func (a *Attributes) WithValue(key, value any) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key any) any { +func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is -// implemented for a value in the attributes, it is called to determine if the -// value matches the one stored in the other attributes. If Equal is not -// implemented, standard equality is used to determine if the two values are -// equal. Note that some types (e.g. maps) aren't comparable by default, so -// they must be wrapped in a struct, or in an alias type, with Equal defined. +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -92,7 +88,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { if !eq.Equal(ov) { return false } @@ -103,39 +99,3 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } - -// String prints the attribute map. If any key or values throughout the map -// implement fmt.Stringer, it calls that method and appends. -func (a *Attributes) String() string { - var sb strings.Builder - sb.WriteString("{") - first := true - for k, v := range a.m { - if !first { - sb.WriteString(", ") - } - sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) - first = false - } - sb.WriteString("}") - return sb.String() -} - -func str(x any) (s string) { - if v, ok := x.(fmt.Stringer); ok { - return fmt.Sprint(v) - } else if v, ok := x.(string); ok { - return v - } - return fmt.Sprintf("<%p>", x) -} - -// MarshalJSON helps implement the json.Marshaler interface, thereby rendering -// the Attributes correctly when printing (via pretty.JSON) structs containing -// Attributes as fields. -// -// Is it impossible to unmarshal attributes from a JSON representation and this -// method is meant only for debugging purposes. -func (a *Attributes) MarshalJSON() ([]byte, error) { - return []byte(a.String()), nil -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d79560a2..392b21fb 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -40,8 +39,6 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) - - logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -54,12 +51,6 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - if strings.ToLower(b.Name()) != b.Name() { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) - } m[strings.ToLower(b.Name())] = b } @@ -79,12 +70,6 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { - if strings.ToLower(name) != name { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) - } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -120,8 +105,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -130,13 +115,6 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -151,11 +129,6 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool - // StateListener is called when the state of the subconn changes. If nil, - // Balancer.UpdateSubConnState will be called instead. Will never be - // invoked until after Connect() is called on the SubConn created with - // these options. - StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -177,24 +150,16 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. - // - // Deprecated: please be aware that in a future version, SubConns will only - // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. - // - // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This may trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. + // This will trigger a state transition for the SubConn. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -285,7 +250,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad any + ServerLoad interface{} } var ( @@ -314,14 +279,6 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) - - // Metadata provides a way for LB policies to inject arbitrary per-call - // metadata. Any metadata returned here will be merged with existing - // metadata added by the client application. - // - // LB policies with child policies are responsible for propagating metadata - // injected by their children to the ClientConn, as part of Pick(). - Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -378,13 +335,9 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. - // - // Deprecated: Use NewSubConnOptions.StateListener when creating the - // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not currently required to - // call SubConn.Shutdown for its existing SubConns; however, this will be - // required in a future release, so it is recommended. + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. Close() } @@ -429,14 +382,15 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer any +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec..3929c26d 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,12 +105,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - var sc balancer.SubConn - opts := balancer.NewSubConnOptions{ - HealthCheckEnabled: b.config.HealthCheck, - StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, - } - sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -126,10 +121,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - sc.Shutdown() + b.cc.RemoveSubConn(sc) b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in updateSubConnState. + // The entry will be deleted in UpdateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -182,12 +177,7 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } -// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) -} - -func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -214,8 +204,8 @@ func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called Shutdown but kept - // the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -236,7 +226,7 @@ func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call Shutdown for the SubConns. +// and it doesn't need to call RemoveSubConn for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000..0359956d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,481 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string + + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. +} + +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + } + go ccb.watcher() + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + return ccb +} + +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case u := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { + break + } + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) + case *scStateUpdate: + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) + } + case <-ccb.closed.Done(): + } + + if ccb.closed.HasFired() { + ccb.handleClose() + return + } + } +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) +} + +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs + } + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.updateCh.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { + ccb.balancer.ResolverError(err) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + newAC, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() + if acState != connectivity.Idle { + go newAC.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go deleted file mode 100644 index b5e30cff..00000000 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ /dev/null @@ -1,380 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" -) - -// ccBalancerWrapper sits between the ClientConn and the Balancer. -// -// ccBalancerWrapper implements methods corresponding to the ones on the -// balancer.Balancer interface. The ClientConn is free to call these methods -// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen in order by performing them in the serializer, without -// any mutexes held. -// -// ccBalancerWrapper also implements the balancer.ClientConn interface and is -// passed to the Balancer implementations. It invokes unexported methods on the -// ClientConn to handle these calls from the Balancer. -// -// It uses the gracefulswitch.Balancer internally to ensure that balancer -// switches happen in a graceful manner. -type ccBalancerWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - // The following fields are only accessed within the serializer or during - // initialization. - curBalancerName string - balancer *gracefulswitch.Balancer - - // The following field is protected by mu. Caller must take cc.mu before - // taking mu. - mu sync.Mutex - closed bool -} - -// newCCBalancerWrapper creates a new balancer wrapper in idle state. The -// underlying balancer is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(cc.ctx) - ccb := &ccBalancerWrapper{ - cc: cc, - opts: balancer.BuildOptions{ - DialCreds: cc.dopts.copts.TransportCredentials, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - return ccb -} - -// updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. This is always executed from the serializer, so -// it is safe to call into the balancer here. -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { - defer close(errCh) - if ctx.Err() != nil || ccb.balancer == nil { - return - } - err := ccb.balancer.UpdateClientConnState(*ccs) - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - errCh <- err - }) - if !ok { - return nil - } - return <-errCh -} - -// resolverError is invoked by grpc to push a resolver error to the underlying -// balancer. The call to the balancer is executed from the serializer. -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - ccb.balancer.ResolverError(err) - }) -} - -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - -// close initiates async shutdown of the wrapper. cc.mu must be held when -// calling this function. To determine the wrapper has finished shutting down, -// the channel should block on ccb.serializer.Done() without cc.mu held. -func (ccb *ccBalancerWrapper) close() { - ccb.mu.Lock() - ccb.closed = true - ccb.mu.Unlock() - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { - if ccb.balancer == nil { - return - } - ccb.balancer.Close() - ccb.balancer = nil - }) - ccb.serializerCancel() -} - -// exitIdle invokes the balancer's exitIdle method in the serializer. -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - ccb.balancer.ExitIdle() - }) -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - ccb.cc.mu.Lock() - defer ccb.cc.mu.Unlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") - } - ccb.mu.Unlock() - - if len(addrs) == 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ac, err := ccb.cc.newAddrConnLocked(addrs, opts) - if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) - return nil, err - } - acbw := &acBalancerWrapper{ - ccb: ccb, - ac: ac, - producers: make(map[balancer.ProducerBuilder]*refCountedProducer), - stateListener: opts.StateListener, - } - ac.acbw = acbw - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The graceful switch balancer will never call this. - logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") -} - -func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - acbw.UpdateAddresses(addrs) -} - -func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.cc.mu.Lock() - defer ccb.cc.mu.Unlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - - // Note that there is no need to check if the balancer wrapper was closed, - // as we know the graceful switch LB policy will not call cc if it has been - // closed. - ccb.cc.pickerWrapper.updatePicker(s.Picker) - ccb.cc.csMgr.updateState(s.ConnectivityState) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - ccb.cc.mu.RLock() - defer ccb.cc.mu.RUnlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - ccb.cc.resolveNowLocked(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - ac *addrConn // read-only - ccb *ccBalancerWrapper // read-only - stateListener func(balancer.SubConnState) - - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer -} - -// updateState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || acbw.ccb.balancer == nil { - return - } - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) -} - -func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.ac.updateAddrs(addrs) -} - -func (acbw *acBalancerWrapper) Connect() { - go acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) Shutdown() { - acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) -} - -// NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, blocks until it is or ctx expires. Returns an error when the context -// expires or the addrConn is shut down. -func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err - } - return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) -} - -// Invoke performs a unary RPC. If the addrConn is not ready, returns -// errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { - cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(args); err != nil { - return err - } - return cs.RecvMsg(reply) -} - -type refCountedProducer struct { - producer balancer.Producer - refs int // number of current refs to the producer - close func() // underlying producer's close function -} - -func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - - // Look up existing producer from this builder. - pData := acbw.producers[pb] - if pData == nil { - // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} - acbw.producers[pb] = pData - } - // Account for this new reference. - pData.refs++ - - // Return a cleanup function wrapped in a OnceFunc to remove this reference - // and delete the refCountedProducer from the map if the total reference - // count goes to zero. - unref := func() { - acbw.mu.Lock() - pData.refs-- - if pData.refs == 0 { - defer pData.close() // Run outside the acbw mutex - delete(acbw.producers, pb) - } - acbw.mu.Unlock() - } - return pData.producer, grpcsync.OnceFunc(unref) -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index e9e97d45..64a232f2 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,13 +18,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.25.0 +// protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( + proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -40,6 +41,10 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -430,7 +435,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identity. + // The authority is the name of such a server identitiy. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 788c89c1..9e20e4d3 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,7 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f6e815e6..422639c7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "reflect" "strings" "sync" "sync/atomic" @@ -33,11 +34,10 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -46,14 +46,16 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" ) var ( @@ -67,9 +69,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel - // is moving to an idle mode due to inactivity. - errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -117,80 +116,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// newClient returns a new client in idle mode. -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - czData: new(channelzData), - } - - cc.retryThrottler.Store((*retryThrottler)(nil)) - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - - // Apply dial options. - disableGlobalOpts := false - for _, opt := range opts { - if _, ok := opt.(*disableGlobalDialOptions); ok { - disableGlobalOpts = true - break - } - } - - if !disableGlobalOpts { - for _, opt := range globalDialOptions { - opt.apply(&cc.dopts) - } - } - - for _, opt := range opts { - opt.apply(&cc.dopts) - } - chainUnaryClientInterceptors(cc) - chainStreamClientInterceptors(cc) - - if err := cc.validateTransportCredentials(); err != nil { - return nil, err - } - - if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if scpr.Err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) - } - cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) - } - cc.mkp = cc.dopts.copts.KeepaliveParams - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) - - cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. - cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) - return cc, nil -} - // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -208,21 +133,29 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc, err := newClient(target, opts...) - if err != nil { - return nil, err + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) - // We start the channel off in idle mode, but kick it out of idle now, - // instead of waiting for the first RPC. Other gRPC implementations do wait - // for the first RPC to kick the channel out of idle. But doing so would be - // a major behavior change for our users who are used to seeing the channel - // active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, i.e. by making newClient exported. + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) defer func() { if err != nil { @@ -230,14 +163,55 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - // This creates the name resolver, load balancer, etc. - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - return nil, err + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID + + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } + cc.mkp = cc.dopts.copts.KeepaliveParams - // Return now for non-blocking dials. - if !cc.dopts.block { - return cc, nil + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA } if cc.dopts.timeout > 0 { @@ -260,173 +234,101 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - // A blocking dial blocks until the clientConn is ready. - for { - s := cc.GetState() - if s == connectivity.Idle { - cc.Connect() - } - if s == connectivity.Ready { - return cc, nil - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + scSet = true } - return nil, ctx.Err() + default: } } -} + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } -// addTraceEvent is a helper method to add a trace event on the channel. If the -// channel is a nested one, the same event is also added on the parent channel. -func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel %s", msg), - Severity: channelz.CtInfo, + // Determine the resolver to use. + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), - Severity: channelz.CtInfo, - } + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) -} - -type idler ClientConn - -func (i *idler) EnterIdleMode() { - (*ClientConn)(i).enterIdleMode() -} - -func (i *idler) ExitIdleMode() error { - return (*ClientConn)(i).exitIdleMode() -} + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) -// exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. This should never be called directly; use -// cc.idlenessMgr.ExitIdleMode instead. -func (cc *ClientConn) exitIdleMode() (err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return errConnClosing + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + } + case <-ctx.Done(): + return nil, ctx.Err() + } } - cc.mu.Unlock() - - // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline, which would then need to - // acquire cc.mu. - if err := cc.resolverWrapper.start(); err != nil { - return err + if cc.dopts.scChan != nil { + go cc.scWatcher() } - cc.addTraceEvent("exiting idle mode") - return nil -} - -// initIdleStateLocked initializes common state to how it should be while idle. -func (cc *ClientConn) initIdleStateLocked() { - cc.resolverWrapper = newCCResolverWrapper(cc) - cc.balancerWrapper = newCCBalancerWrapper(cc) - cc.firstResolveEvent = grpcsync.NewEvent() - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. - cc.conns = make(map[*addrConn]struct{}) -} - -// enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer, and any subchannels. This should never be -// called directly; use cc.idlenessMgr.EnterIdleMode instead. -func (cc *ClientConn) enterIdleMode() { - cc.mu.Lock() - - if cc.conns == nil { - cc.mu.Unlock() - return + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() } + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) - conns := cc.conns - - rWrapper := cc.resolverWrapper - rWrapper.close() - cc.pickerWrapper.reset() - bWrapper := cc.balancerWrapper - bWrapper.close() - cc.csMgr.updateState(connectivity.Idle) - cc.addTraceEvent("entering idle mode") - - cc.initIdleStateLocked() - - cc.mu.Unlock() - - // Block until the name resolver and LB policy are closed. - <-rWrapper.serializer.Done() - <-bWrapper.serializer.Done() - - // Close all subchannels after the LB policy is closed. - for ac := range conns { - ac.tearDown(errConnIdling) + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) } -} + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() -// validateTransportCredentials performs a series of checks on the configured -// transport credentials. It returns a non-nil error if any of these conditions -// are met: -// - no transport creds and no creds bundle is configured -// - both transport creds and creds bundle are configured -// - creds bundle is configured, but it lacks a transport credentials -// - insecure transport creds configured alongside call creds that require -// transport level security -// -// If none of the above conditions are met, the configured credentials are -// deemed valid and a nil error is returned. -func (cc *ClientConn) validateTransportCredentials() error { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + cc.Connect() + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() } } } - return nil -} -// channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - cc.addTraceEvent("created") + return cc, nil } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -443,7 +345,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -455,7 +357,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -491,27 +393,13 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } -// newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { - return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), - } -} - // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. -// -// TODO: If possible, get rid of the `connectivityStateManager` type, and -// provide this functionality using the `PubSub`, to avoid keeping track of -// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier - pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -527,8 +415,6 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - csm.pubSub.Publish(state) - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -558,7 +444,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -588,31 +474,27 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - idlenessMgr *idle.Manager + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - pickerWrapper *pickerWrapper + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. + firstResolveEvent *grpcsync.Event + // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. - balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. May be accessed without mu - // if we know we cannot be asked to enter idle mode while accessing it (e.g. - // when the idle manager has already been closed, or if we are already - // entering idle mode). - firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -657,15 +539,26 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - cc.addTraceEvent(err.Error()) - return - } - // If the ClientConn was not in idle mode, we need to call ExitIdle on the - // LB policy so that connections can be created. - cc.mu.Lock() cc.balancerWrapper.exitIdle() - cc.mu.Unlock() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -695,16 +588,6 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) - - internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { - return cc.csMgr.pubSub.Subscribe(s) - } - internal.EnterIdleModeForTesting = func(cc *ClientConn) { - cc.idlenessMgr.EnterIdleModeForTesting() - } - internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.idlenessMgr.ExitIdleMode() - } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -719,8 +602,9 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() + cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -766,7 +650,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLBLocked(s.ServiceConfig) + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -788,13 +672,15 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) return ret } -// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// applyFailingLB is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { +// +// Caller must hold cc.mu. +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -802,43 +688,34 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { - out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } - return out +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.balancerWrapper.updateSubConnState(sc, s, err) } -// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { - if cc.conns == nil { - return nil, ErrClientConnClosing - } - +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), + addrs: addrs, scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return nil, ErrClientConnClosing + } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -854,7 +731,6 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. }, }) - // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -912,19 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { - if logger.V(2) { - logger.Infof("connect called on shutdown addrConn; ignoring.") - } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { - if logger.V(2) { - logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) - } ac.mu.Unlock() return nil } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -943,63 +816,58 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// updateAddrs updates ac.addrs with the new addresses list and handles active -// connections or connection attempts. -func (ac *addrConn) updateAddrs(addrs []resolver.Address) { +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - - addrs = copyAddressesWithoutBalancerAttributes(addrs) - if equalAddresses(ac.addrs, addrs) { - ac.mu.Unlock() - return - } - - ac.addrs = addrs - + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - // We were not connecting, so do nothing but update the addresses. - ac.mu.Unlock() - return + ac.addrs = addrs + return true } - if ac.state == connectivity.Ready { - // Try to find the connected address. - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { - // We are connected to a valid address, so do nothing but - // update the addresses. - ac.mu.Unlock() - return - } - } + if equalAddresses(ac.addrs, addrs) { + return true } - // We are either connected to the wrong address or currently connecting. - // Stop the current iteration and restart. - - ac.cancel() - ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - - // We have to defer here because GracefulClose => onClose, which requires - // locking ac.mu. - if ac.transport != nil { - defer ac.transport.GracefulClose() - ac.transport = nil + if ac.state == connectivity.Connecting { + return false } - if len(addrs) == 0 { - ac.updateConnectivityState(connectivity.Idle, nil) + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs } - ac.mu.Unlock() - - // Since we were connecting/connected, we should start a new connection - // attempt. - go ac.resetTransport() + return curAddrFound } // getServerName determines the serverName to be used in the connection @@ -1060,8 +928,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1090,25 +958,35 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - cc.resolverWrapper.resolveNow(o) + r := cc.resolverWrapper cc.mu.RUnlock() -} - -func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { - cc.resolverWrapper.resolveNow(o) + if r == nil { + return + } + go r.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1135,42 +1013,46 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer func() { - cc.cancel() - <-cc.csMgr.pubSub.Done() - }() - - // Prevent calls to enter/exit idle immediately, and ensure we are not - // currently entering/exiting idle mode. - cc.idlenessMgr.Close() + defer cc.cancel() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - // We can safely unlock and continue to access all fields now as - // cc.conns==nil, preventing any further operations on cc. + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper cc.mu.Unlock() - cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.pickerWrapper.close() - cc.balancerWrapper.close() - - <-cc.resolverWrapper.serializer.Done() - <-cc.balancerWrapper.serializer.Done() + cc.blockingpicker.close() + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - cc.addTraceEvent("deleted") + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1186,7 +1068,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw *acBalancerWrapper + acbw balancer.SubConn scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1200,8 +1082,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1215,16 +1096,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s - if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) - } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) - } - ac.acbw.updateState(s, lastErr) + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1243,8 +1117,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - acCtx := ac.ctx - if acCtx.Err() != nil { + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } @@ -1272,16 +1145,15 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() - if acCtx.Err() != nil { - // addrConn was torn down. + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1296,13 +1168,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-acCtx.Done(): + case <-ac.ctx.Done(): timer.Stop() return } ac.mu.Lock() - if acCtx.Err() == nil { + if ac.state != connectivity.Shutdown { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1317,13 +1189,14 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - if ctx.Err() != nil { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return errConnClosing } - ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1337,7 +1210,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(ctx, addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { return nil } @@ -1354,20 +1227,17 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ctx) + hctx, hcancel := context.WithCancel(ac.ctx) - onClose := func(r transport.GoAwayReason) { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - // adjust params based on GoAwayReason - ac.adjustParams(r) - if ctx.Err() != nil { - // Already shut down or connection attempt canceled. tearDown() or - // updateAddrs() already cleared the transport and canceled hctx - // via ac.ctx, and we expected this connection to be closed, so do - // nothing here. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. return } hcancel() @@ -1384,17 +1254,20 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, // Always go idle and wait for the LB policy to initiate a new // connection attempt. ac.updateConnectivityState(connectivity.Idle, nil) + }) + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() } - connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { - if logger.V(2) { - logger.Infof("Creating new client transport to %q: %v", addr, err) - } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) @@ -1403,7 +1276,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, ac.mu.Lock() defer ac.mu.Unlock() - if ctx.Err() != nil { + if ac.state == connectivity.Shutdown { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1415,9 +1288,6 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - // - // This can also happen when updateAddrs is called during a connection - // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1477,7 +1347,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (any, error) { + newStream := func(method string) (interface{}, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1501,7 +1371,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1525,29 +1395,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1565,7 +1412,16 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1579,29 +1435,6 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() - - // We have to release the lock before the call to GracefulClose/Close here - // because both of them call onClose(), which requires locking ac.mu. - if curTr != nil { - if err == errConnDrain { - // Close the transport gracefully when the subConn is being shutdown. - // - // GracefulClose() may be executed multiple times if: - // - multiple GoAway frames are received from the server - // - there are concurrent name resolver or balancer triggered - // address removal and GoAway - curTr.GracefulClose() - } else { - // Hard close the transport when the channel is entering idle or is - // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. - // But in the case where the channel is entering idle, we need to - // explicitly close the transports here. Instead of distinguishing - // between these two cases, it is simpler to close the transport - // unconditionally here. - curTr.Close(err) - } - } } func (ac *addrConn) getState() connectivity.State { @@ -1689,9 +1522,6 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") -// getResolver finds the scheme in the cc's resolvers or the global registry. -// scheme should always be lowercase (typically by virtue of url.Parse() -// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1713,14 +1543,7 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. -// -// The resolver to use is determined based on the scheme in the parsed target -// and the same is stored in `cc.resolverBuilder`. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1728,12 +1551,11 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil + return rb, nil } } @@ -1748,98 +1570,51 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return err + return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil + return rb, nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing url. Query params are stripped from the -// endpoint. +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - - return resolver.Target{URL: *u}, nil -} - -func encodeAuthority(authority string) string { - const upperhex = "0123456789ABCDEF" - - // Return for characters that must be escaped as per - // Valid chars are mentioned here: - // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 - shouldEscape := func(c byte) bool { - // Alphanum are always allowed. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - switch c { - case '-', '_', '.', '~': // Unreserved characters - return false - case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters - return false - case ':', '[', ']', '@': // Authority related delimeters - return false - } - // Everything else must be escaped. - return true - } - - hexCount := 0 - for i := 0; i < len(authority); i++ { - c := authority[i] - if shouldEscape(c) { - hexCount++ - } - } - - if hexCount == 0 { - return authority - } - - required := len(authority) + 2*hexCount - t := make([]byte, required) - - j := 0 - // This logic is a barebones version of escape in the go net/url library. - for i := 0; i < len(authority); i++ { - switch c := authority[i]; { - case shouldEscape(c): - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - default: - t[j] = authority[i] - j++ - } - } - return string(t) + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -// -// Stores the determined authority in `cc.authority`. -// -// Returns a non-nil error if the authority returned by the transport -// credentials do not match the authority configured through the dial option. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { - dopts := cc.dopts +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1856,21 +1631,25 @@ func (cc *ClientConn) determineAuthority() error { } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) - } - - endpoint := cc.parsedTarget.Endpoint() - if authorityFromDialOption != "" { - cc.authority = authorityFromDialOption - } else if authorityFromCreds != "" { - cc.authority = authorityFromCreds - } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { - cc.authority = auth.OverrideAuthority(cc.parsedTarget) - } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint - } else { - cc.authority = encodeAuthority(endpoint) + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - return nil } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd..12977654 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v any) ([]byte, error) + Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v any) error + Unmarshal(data []byte, v interface{}) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 934fac2b..0b206a57 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,15 +18,7 @@ package codes -import ( - "strconv" - - "google.golang.org/grpc/internal" -) - -func init() { - internal.CanonicalString = canonicalString -} +import "strconv" func (c Code) String() string { switch c { @@ -68,44 +60,3 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } - -func canonicalString(c Code) string { - switch c { - case OK: - return "OK" - case Canceled: - return "CANCELLED" - case Unknown: - return "UNKNOWN" - case InvalidArgument: - return "INVALID_ARGUMENT" - case DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case NotFound: - return "NOT_FOUND" - case AlreadyExists: - return "ALREADY_EXISTS" - case PermissionDenied: - return "PERMISSION_DENIED" - case ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case FailedPrecondition: - return "FAILED_PRECONDITION" - case Aborted: - return "ABORTED" - case OutOfRange: - return "OUT_OF_RANGE" - case Unimplemented: - return "UNIMPLEMENTED" - case Internal: - return "INTERNAL" - case Unavailable: - return "UNAVAILABLE" - case DataLoss: - return "DATA_LOSS" - case Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" - } -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1..11b10618 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,13 +25,7 @@ import ( "strconv" ) -// A Code is a status code defined according to the [gRPC documentation]. -// -// Only the codes defined as consts in this package are valid codes. Do not use -// other code values. Behavior of other codes is implementation-specific and -// interoperability between implementations is not guaranteed. -// -// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34e..ce2bbc10 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" "net/url" - "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -44,25 +44,10 @@ func (t TLSInfo) AuthType() string { return "tls" } -// cipherSuiteLookup returns the string version of a TLS cipher suite ID. -func cipherSuiteLookup(cipherSuiteID uint16) string { - for _, s := range tls.CipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - for _, s := range tls.InsecureCipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - return fmt.Sprintf("unknown ID: %v", cipherSuiteID) -} - // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup(t.State.CipherSuite), + StandardName: cipherSuiteLookup[t.State.CipherSuite], } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -153,39 +138,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -// The following cipher suites are forbidden for use with HTTP/2 by -// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A -var tls12ForbiddenCipherSuites = map[uint16]struct{}{ - tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) - // If the user did not configure a MinVersion and did not configure a - // MaxVersion < 1.2, use MinVersion=1.2, which is required by - // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 - } - // If the user did not configure CipherSuites, use all "secure" cipher - // suites reported by the TLS package, but remove some explicitly forbidden - // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { - for _, cs := range tls.CipherSuites() { - if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) - } - } - } return tc } @@ -210,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := os.ReadFile(certFile) + b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err } @@ -249,3 +205,32 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ba242618..9372dc32 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -38,15 +38,12 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - globalDialOptions = append(globalDialOptions, opt...) + extraDialOptions = append(extraDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - globalDialOptions = nil + extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger - internal.JoinDialOptions = newJoinDialOption - internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -64,6 +61,7 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration + scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -77,8 +75,6 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder - idleTimeout time.Duration - recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -86,7 +82,7 @@ type DialOption interface { apply(*dialOptions) } -var globalDialOptions []DialOption +var extraDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. @@ -99,16 +95,6 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} -type disableGlobalDialOptions struct{} - -func (disableGlobalDialOptions) apply(*dialOptions) {} - -// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn -// from applying the global DialOptions (set via AddGlobalDialOptions). -func newDisableGlobalDialOptions() DialOption { - return &disableGlobalDialOptions{} -} - // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -125,42 +111,13 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } -type joinDialOption struct { - opts []DialOption -} - -func (jdo *joinDialOption) apply(do *dialOptions) { - for _, opt := range jdo.opts { - opt.apply(do) - } -} - -func newJoinDialOption(opts ...DialOption) DialOption { - return &joinDialOption{opts: opts} -} - -// WithSharedWriteBuffer allows reusing per-connection transport write buffer. -// If this option is set to true every connection will release the buffer after -// flushing the data on the wire. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithSharedWriteBuffer(val bool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.SharedWriteBuffer = val - }) -} - // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero or negative values will disable the write buffer such that each write -// will be on underlying connection. Note: A Send call may not directly -// translate to a write. +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -170,9 +127,8 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero or negative values will -// disable read buffer for a connection so data framer can access the -// underlying conn directly. +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -250,6 +206,19 @@ func WithDecompressor(dc Decompressor) DialOption { }) } +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -298,9 +267,6 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. -// -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -312,9 +278,6 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -400,17 +363,6 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. -// -// Note: All supported releases of Go (as of December 2023) override the OS -// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive -// with OS defaults for keepalive time and interval, use a net.Dialer that sets -// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket -// option to true from the Control field. For a concrete example of how to do -// this, see internal.NetDialerWithTCPKeepalive(). -// -// For more information, please see [issue 23459] in the Go github repo. -// -// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -468,9 +420,6 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -485,7 +434,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + " " + grpcUA + o.copts.UserAgent = s }) } @@ -635,16 +584,12 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ + healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - ReadBufferSize: defaultReadBufSize, WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, UseProxy: true, - UserAgent: grpcUA, }, - bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, - idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, } } @@ -673,46 +618,3 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } - -// WithIdleTimeout returns a DialOption that configures an idle timeout for the -// channel. If the channel is idle for the configured timeout, i.e there are no -// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode -// and as a result the name resolver and load balancer will be shut down. The -// channel will exit idle mode when the Connect() method is called or when an -// RPC is initiated. -// -// A default timeout of 30 minutes will be used if this dial option is not set -// at dial time and idleness can be disabled by passing a timeout of zero. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithIdleTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.idleTimeout = d - }) -} - -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool - }) -} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7..711763d5 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,10 +38,6 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. -// -// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, -// gRPC will invoke it to determine the size of the buffer allocated for the -// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -55,6 +51,15 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. } var registeredCompressor = make(map[string]Compressor) @@ -70,9 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c - if !grpcutil.IsCompressorNameRegistered(c.Name()) { - grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) - } + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. @@ -85,9 +88,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v any) ([]byte, error) + Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v any) error + Unmarshal(data []byte, v interface{}) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 0ee3d3ba..3009b35a 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (codec) Marshal(v interface{}) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v any) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v any) error { +func (codec) Unmarshal(data []byte, v interface{}) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ce..8358dd6e 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...any) { +func (c *componentData) Info(args ...interface{}) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...any) { +func (c *componentData) Warning(args ...interface{}) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...any) { +func (c *componentData) Error(args ...interface{}) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...any) { +func (c *componentData) Fatal(args ...interface{}) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...any) { +func (c *componentData) Infof(format string, args ...interface{}) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...any) { +func (c *componentData) Warningf(format string, args ...interface{}) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...any) { +func (c *componentData) Errorf(format string, args ...interface{}) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...any) { +func (c *componentData) Fatalf(format string, args ...interface{}) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...any) { +func (c *componentData) Infoln(args ...interface{}) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...any) { +func (c *componentData) Warningln(args ...interface{}) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...any) { +func (c *componentData) Errorln(args ...interface{}) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...any) { +func (c *componentData) Fatalln(args ...interface{}) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9c..c8bb2be3 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...any) { +func Info(args ...interface{}) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...any) { +func Infof(format string, args ...interface{}) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...any) { +func Infoln(args ...interface{}) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...any) { +func Warning(args ...interface{}) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...any) { +func Warningf(format string, args ...interface{}) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...any) { +func Warningln(args ...interface{}) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...any) { +func Error(args ...interface{}) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...any) { +func Errorf(format string, args ...interface{}) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...any) { +func Errorln(args ...interface{}) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...any) { +func Fatal(args ...interface{}) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...any) { +func Fatalf(format string, args ...interface{}) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...any) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...any) { +func Fatalln(args ...interface{}) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...any) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...any) { +func Print(args ...interface{}) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...any) { +func Printf(format string, args ...interface{}) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...any) { +func Println(args ...interface{}) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d82..ef06a482 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...any) { +func (g *loggerWrapper) Info(args ...interface{}) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...any) { +func (g *loggerWrapper) Infoln(args ...interface{}) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...any) { +func (g *loggerWrapper) Infof(format string, args ...interface{}) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...any) { +func (g *loggerWrapper) Warning(args ...interface{}) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...any) { +func (g *loggerWrapper) Warningln(args ...interface{}) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...any) { +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...any) { +func (g *loggerWrapper) Error(args ...interface{}) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...any) { +func (g *loggerWrapper) Errorln(args ...interface{}) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...any) { +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7..b5560b47 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "log" "os" "strconv" @@ -33,35 +34,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) + Info(args ...interface{}) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) + Infoln(args ...interface{}) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) + Infof(format string, args ...interface{}) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) + Warning(args ...interface{}) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) + Warningln(args ...interface{}) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) + Warningf(format string, args ...interface{}) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) + Error(args ...interface{}) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) + Errorln(args ...interface{}) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) + Errorf(format string, args ...interface{}) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) + Fatal(args ...interface{}) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) + Fatalln(args ...interface{}) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) + Fatalf(format string, args ...interface{}) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -139,9 +140,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := io.Discard - warningW := io.Discard - infoW := io.Discard + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -182,53 +183,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...any) { +func (g *loggerT) Info(args ...interface{}) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...any) { +func (g *loggerT) Infoln(args ...interface{}) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...any) { +func (g *loggerT) Infof(format string, args ...interface{}) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...any) { +func (g *loggerT) Warning(args ...interface{}) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...any) { +func (g *loggerT) Warningln(args ...interface{}) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...any) { +func (g *loggerT) Warningf(format string, args ...interface{}) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...any) { +func (g *loggerT) Error(args ...interface{}) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...any) { +func (g *loggerT) Errorln(args ...interface{}) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...any) { +func (g *loggerT) Errorf(format string, args ...interface{}) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...any) { +func (g *loggerT) Fatal(args ...interface{}) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...any) { +func (g *loggerT) Fatalln(args ...interface{}) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...any) { +func (g *loggerT) Fatalf(format string, args ...interface{}) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +249,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) + InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) + WarningDepth(depth int, args ...interface{}) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) + ErrorDepth(depth int, args ...interface{}) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) + FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go deleted file mode 100644 index 740745c4..00000000 --- a/vendor/google.golang.org/grpc/health/client.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import ( - "context" - "fmt" - "io" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/status" -) - -var ( - backoffStrategy = backoff.DefaultExponential - backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false - } - } -) - -func init() { - internal.HealthCheckFunc = clientHealthCheck -} - -const healthCheckMethod = "/grpc.health.v1.Health/Watch" - -// This function implements the protocol defined at: -// https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { - tryCnt := 0 - -retryConnection: - for { - // Backs off if the connection has failed in some way without receiving a message in the previous retry. - if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { - return nil - } - tryCnt++ - - if ctx.Err() != nil { - return nil - } - setConnectivityState(connectivity.Connecting, nil) - rawS, err := newStream(healthCheckMethod) - if err != nil { - continue retryConnection - } - - s, ok := rawS.(grpc.ClientStream) - // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. - if !ok { - setConnectivityState(connectivity.Ready, nil) - return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) - } - - if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { - // Stream should have been closed, so we can safely continue to create a new stream. - continue retryConnection - } - s.CloseSend() - - resp := new(healthpb.HealthCheckResponse) - for { - err = s.RecvMsg(resp) - - // Reports healthy for the LBing purposes if health check is not implemented in the server. - if status.Code(err) == codes.Unimplemented { - setConnectivityState(connectivity.Ready, nil) - return err - } - - // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. - if err != nil { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) - continue retryConnection - } - - // As a message has been received, removes the need for backoff for the next retry by resetting the try count. - tryCnt = 0 - if resp.Status == healthpb.HealthCheckResponse_SERVING { - setConnectivityState(connectivity.Ready, nil) - } else { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) - } - } - } -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 24299efd..a66024d2 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,13 +17,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.25.0 +// protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( + proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,6 +38,10 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 4439cda0..a332dfd7 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -35,24 +35,12 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 -const ( - Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" - Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" -) - // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // Check gets the health of the specified service. If the requested service - // is unknown, the call will fail with status NOT_FOUND. If the caller does - // not specify a service name, the server should respond with its overall - // health status. - // - // Clients should set a deadline when calling Check, and can declare the - // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -82,7 +70,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) if err != nil { return nil, err } @@ -90,7 +78,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) if err != nil { return nil, err } @@ -125,15 +113,8 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // Check gets the health of the specified service. If the requested service - // is unknown, the call will fail with status NOT_FOUND. If the caller does - // not specify a service name, the server should respond with its overall - // health status. - // - // Clients should set a deadline when calling Check, and can declare the - // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -185,7 +166,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Health_Check_FullMethodName, + FullMethod: "/grpc.health.v1.Health/Check", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go deleted file mode 100644 index 83c6acf5..00000000 --- a/vendor/google.golang.org/grpc/health/logging.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import "google.golang.org/grpc/grpclog" - -var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go deleted file mode 100644 index cce6312d..00000000 --- a/vendor/google.golang.org/grpc/health/server.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package health provides a service that exposes server's health and it must be -// imported to enable support for client-side health checks. -package health - -import ( - "context" - "sync" - - "google.golang.org/grpc/codes" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -// Server implements `service Health`. -type Server struct { - healthgrpc.UnimplementedHealthServer - mu sync.RWMutex - // If shutdown is true, it's expected all serving status is NOT_SERVING, and - // will stay in NOT_SERVING. - shutdown bool - // statusMap stores the serving status of the services this Server monitors. - statusMap map[string]healthpb.HealthCheckResponse_ServingStatus - updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus -} - -// NewServer returns a new Server. -func NewServer() *Server { - return &Server{ - statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, - updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), - } -} - -// Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if servingStatus, ok := s.statusMap[in.Service]; ok { - return &healthpb.HealthCheckResponse{ - Status: servingStatus, - }, nil - } - return nil, status.Error(codes.NotFound, "unknown service") -} - -// Watch implements `service Health`. -func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - service := in.Service - // update channel is used for getting service status updates. - update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) - s.mu.Lock() - // Puts the initial status to the channel. - if servingStatus, ok := s.statusMap[service]; ok { - update <- servingStatus - } else { - update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN - } - - // Registers the update channel to the correct place in the updates map. - if _, ok := s.updates[service]; !ok { - s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) - } - s.updates[service][stream] = update - defer func() { - s.mu.Lock() - delete(s.updates[service], stream) - s.mu.Unlock() - }() - s.mu.Unlock() - - var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 - for { - select { - // Status updated. Sends the up-to-date status to the client. - case servingStatus := <-update: - if lastSentStatus == servingStatus { - continue - } - lastSentStatus = servingStatus - err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) - if err != nil { - return status.Error(codes.Canceled, "Stream has ended.") - } - // Context done. Removes the update channel from the updates map. - case <-stream.Context().Done(): - return status.Error(codes.Canceled, "Stream has ended.") - } - } -} - -// SetServingStatus is called when need to reset the serving status of a service -// or insert a new service entry into the statusMap. -func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.mu.Lock() - defer s.mu.Unlock() - if s.shutdown { - logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) - return - } - - s.setServingStatusLocked(service, servingStatus) -} - -func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.statusMap[service] = servingStatus - for _, update := range s.updates[service] { - // Clears previous updates, that are not sent to the client, from the channel. - // This can happen if the client is not reading and the server gets flow control limited. - select { - case <-update: - default: - } - // Puts the most recent update to the channel. - update <- servingStatus - } -} - -// Shutdown sets all serving status to NOT_SERVING, and configures the server to -// ignore all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Shutdown() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = true - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) - } -} - -// Resume sets all serving status to SERVING, and configures the server to -// accept all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Resume() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = false - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) - } -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 877d78fc..bb96ef57 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *C // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server any + Server interface{} // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req any) (any, error) +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011..5fc0ee3d 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,8 +23,6 @@ package backoff import ( - "context" - "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -73,37 +71,3 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } - -// ErrResetBackoff is the error to be returned by the function executed by RunF, -// to instruct the latter to reset its backoff state. -var ErrResetBackoff = errors.New("reset backoff state") - -// RunF provides a convenient way to run a function f repeatedly until the -// context expires or f returns a non-nil error that is not ErrResetBackoff. -// When f returns ErrResetBackoff, RunF continues to run f, but resets its -// backoff state before doing so. backoff accepts an integer representing the -// number of retries, and returns the amount of time to backoff. -func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { - attempt := 0 - timer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-timer.C: - case <-ctx.Done(): - timer.Stop() - return - } - - err := f() - if errors.Is(err, ErrResetBackoff) { - timer.Reset(0) - attempt = 0 - continue - } - if err != nil { - return - } - timer.Reset(backoff(attempt)) - attempt++ - } -} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 3c594e6e..08666f62 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// updateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,26 +214,13 @@ func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } + gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. - gsb.mu.Unlock() return } - if state.ConnectivityState == connectivity.Shutdown { - delete(balToUpdate.subconns, sc) - } - gsb.mu.Unlock() - if cb != nil { - cb(state) - } else { - balToUpdate.UpdateSubConnState(sc, state) - } -} - -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - gsb.updateSubConnState(sc, state, nil) + balToUpdate.UpdateSubConnState(sc, state) } // Close closes any active child balancers. @@ -255,7 +242,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/shutdown +// the wrapped balancer and calls from the latter to create/update/remove // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -267,10 +254,21 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -// Close closes the underlying LB policy and shuts down the subconns it -// created. bw must not be referenced via balancerCurrent or balancerPending in -// gsb when called. gsb.mu must not be held. Does not panic with a nil -// receiver. +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -283,7 +281,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - sc.Shutdown() + bw.gsb.cc.RemoveSubConn(sc) } bw.gsb.mu.Unlock() } @@ -337,16 +335,13 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() - var sc balancer.SubConn - oldListener := opts.StateListener - opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - sc.Shutdown() + bw.gsb.cc.RemoveSubConn(sc) bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -365,9 +360,13 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Note: existing third party balancers may call this, so it must remain - // until RemoveSubConn is fully removed. - sc.Shutdown() + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 94a08d68..3a905d96 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) any + Parse(md metadata.MD) interface{} } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) any { +func Parse(md metadata.MD) interface{} { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 755fdebc..809d73cc 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,13 +28,8 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -var grpclogLogger = grpclog.Component("binarylog") - -// Logger specifies MethodLoggers for method names with a Log call that -// takes a context. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. +// Logger is the global binary logger. It can be used to get binary logger for +// each method. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -45,6 +40,8 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0f31274a..179f4a26 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,7 +19,6 @@ package binarylog import ( - "context" "net" "strings" "sync/atomic" @@ -27,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -49,11 +48,8 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. type MethodLogger interface { - Log(context.Context, LogEntryConfig) + Log(LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -68,9 +64,6 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -86,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -94,22 +87,22 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *binlogpb.GrpcLogEntry_ClientHeader: + case *pb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *binlogpb.GrpcLogEntry_ServerHeader: + case *pb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *binlogpb.GrpcLogEntry_Message: + case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -128,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (tru // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + currentEntryLen := uint64(len(entry.Value)) if currentEntryLen > bytesLimit { break } @@ -139,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (tru return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -151,11 +144,8 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. type LogEntryConfig interface { - toProto() *binlogpb.GrpcLogEntry + toProto() *pb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -169,10 +159,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &binlogpb.ClientHeader{ + clientHeader := &pb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -180,16 +170,16 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -205,19 +195,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &binlogpb.GrpcLogEntry_ServerHeader{ - ServerHeader: &binlogpb.ServerHeader{ +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -230,10 +220,10 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message any + Message interface{} } -func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { var ( data []byte err error @@ -248,19 +238,19 @@ func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &binlogpb.GrpcLogEntry_Message{ - Message: &binlogpb.Message{ + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -270,10 +260,10 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message any + Message interface{} } -func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { var ( data []byte err error @@ -288,19 +278,19 @@ func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &binlogpb.GrpcLogEntry_Message{ - Message: &binlogpb.Message{ + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -310,15 +300,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -334,7 +324,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -350,10 +340,10 @@ func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &binlogpb.GrpcLogEntry_Trailer{ - Trailer: &binlogpb.Trailer{ + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -362,9 +352,9 @@ func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -377,15 +367,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -402,15 +392,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { - ret := &binlogpb.Metadata{} +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &binlogpb.MetadataEntry{ + &pb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -420,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { return ret } -func addrToProto(addr net.Addr) *binlogpb.Address { - ret := &binlogpb.Address{} +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = binlogpb.Address_TYPE_IPV4 + ret.Type = pb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = binlogpb.Address_TYPE_IPV6 + ret.Type = pb.Address_TYPE_IPV6 } else { - ret.Type = binlogpb.Address_TYPE_UNKNOWN + ret.Type = pb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = binlogpb.Address_TYPE_UNIX + ret.Type = pb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = binlogpb.Address_TYPE_UNKNOWN + ret.Type = pb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 264de387..c2fdd58b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*binlogpb.GrpcLogEntry) error + Write(*pb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668..9f6a0c12 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,10 +18,7 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import ( - "errors" - "sync" -) +import "sync" // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -31,50 +28,42 @@ import ( // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `any`. This means that a call to Put() incurs an extra memory allocation, -// and also that users need a type assertion while reading. For performance -// critical code paths, using Unbounded is strongly discouraged and defining a -// new type specific implementation of this buffer is preferred. See +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan any - closed bool - closing bool + c chan interface{} mu sync.Mutex - backlog []any + backlog []interface{} } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan any, 1)} + return &Unbounded{c: make(chan interface{}, 1)} } -var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") - // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) error { +func (b *Unbounded) Put(t interface{}) { b.mu.Lock() - defer b.mu.Unlock() - if b.closing { - return errBufferClosed - } if len(b.backlog) == 0 { select { case b.c <- t: - return nil + b.mu.Unlock() + return default: } } b.backlog = append(b.backlog, t) - return nil + b.mu.Unlock() } -// Load sends the earliest buffered data, if any, onto the read channel returned -// by Get(). Users are expected to call this every time they successfully read a +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -82,9 +71,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } - } else if b.closing && !b.closed { - close(b.c) } + b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -92,25 +80,6 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -// -// If the unbounded buffer is closed, the read channel returned by this method -// is closed after all data is drained. -func (b *Unbounded) Get() <-chan any { +func (b *Unbounded) Get() <-chan interface{} { return b.c } - -// Close closes the unbounded buffer. No subsequent data may be Put(), and the -// channel returned from Get() will be closed after all the data is read and -// Load() is called for the final time. -func (b *Unbounded) Close() { - b.mu.Lock() - defer b.mu.Unlock() - if b.closing { - return - } - b.closing = true - if len(b.backlog) == 0 { - b.closed = true - close(b.c) - } -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index fc094f34..777cbcd7 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,14 +24,15 @@ package channelz import ( + "context" "errors" + "fmt" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" ) const ( @@ -39,11 +40,8 @@ const ( ) var ( - // IDGen is the global channelz entity ID generator. It should not be used - // outside this package except by tests. - IDGen IDGenerator - - db dbWrapper + db dbWrapper + idGen idGenerator // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -54,20 +52,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - IDGen.Reset() + idGen.reset() atomic.StoreInt32(&curState, 1) } } -func init() { - internal.ChannelzTurnOffForTesting = func() { - atomic.StoreInt32(&curState, 0) - } -} - // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.LoadInt32(&curState) == 1 + return atomic.CompareAndSwapInt32(&curState, 1, 1) } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -105,6 +97,43 @@ func (d *dbWrapper) get() *channelMap { return d.DB } +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. +// +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) + idGen.reset() + + return func() error { + cm := db.get() + if cm == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { + return nil + } + <-ticker.C + } + } +} + // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -164,7 +193,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := IDGen.genID() + id := idGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -200,7 +229,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := IDGen.genID() + id := idGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -222,7 +251,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := IDGen.genID() + id := idGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -248,7 +277,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := IDGen.genID() + id := idGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -268,7 +297,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := IDGen.genID() + id := idGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -747,17 +776,14 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. -type IDGenerator struct { +type idGenerator struct { id int64 } -// Reset resets the generated ID back to zero. Should only be used at -// initialization or by tests sensitive to the ID number. -func (i *IDGenerator) Reset() { +func (i *idGenerator) reset() { atomic.StoreInt64(&i.id, 0) } -func (i *IDGenerator) genID() int64 { +func (i *idGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index f89e6f77..8e13a3d2 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...an } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 1d4020f5..7b2f350e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,7 +628,6 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap - clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -657,10 +656,6 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 98288c3f..8d194e44 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { +func GetSocketOption(socket interface{}) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index b5568b22..837ddc40 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(c interface{}) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 9deee7f6..32c9b590 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { +func RequestInfoFromContext(ctx context.Context) interface{} { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) any { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) any { +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 685a3cb4..7edd196b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,49 +21,19 @@ package envconfig import ( "os" - "strconv" "strings" ) +const ( + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" +) + var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") // AdvertiseCompressors is set if registered compressor should be advertised // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) - // RingHashCap indicates the maximum ring size which defaults to 4096 - // entries but may be overridden by setting the environment variable - // "GRPC_RING_HASH_CAP". This does not override the default bounds - // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). - RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) - // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS - // handshakes that can be performed. - ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) - -func boolFromEnv(envVar string, def bool) bool { - if def { - // The default is true; return true unless the variable is "false". - return !strings.EqualFold(os.Getenv(envVar), "false") - } - // The default is false; return false unless the variable is "true". - return strings.EqualFold(os.Getenv(envVar), "true") -} - -func uint64FromEnv(envVar string, def, min, max uint64) uint64 { - v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) - if err != nil { - return def - } - if v < min { - return min - } - if v > max { - return max - } - return v -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index dd314cfb..821dd0a7 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,15 +28,9 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234ac..af09711a 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,6 +20,7 @@ package envconfig import ( "os" + "strings" ) const ( @@ -35,6 +36,16 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -50,7 +61,41 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go deleted file mode 100644 index 7f7044e1..00000000 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial - // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - - // RecvBufferPool is implemented by the grpc package and returns a server - // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption -) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index bfc45102..b68e26a3 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { +func InfoDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...any) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { +func WarningDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...any) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { +func ErrorDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...any) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { +func FatalDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...any) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) + Info(args ...interface{}) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) + Infoln(args ...interface{}) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) + Infof(format string, args ...interface{}) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) + Warning(args ...interface{}) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) + Warningln(args ...interface{}) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) + Warningf(format string, args ...interface{}) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) + Error(args ...interface{}) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) + Errorln(args ...interface{}) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) + Errorf(format string, args ...interface{}) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) + Fatal(args ...interface{}) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) + Fatalln(args ...interface{}) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) + Fatalf(format string, args ...interface{}) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) + InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) + WarningDepth(depth int, args ...interface{}) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) + ErrorDepth(depth int, args ...interface{}) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) + FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index faa998de..82af70e9 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...any) { +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...any) { +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...any) { +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,10 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { if !Logger.V(2) { return } @@ -76,15 +73,6 @@ func (pl *PrefixLogger) Debugf(format string, args ...any) { return } InfoDepth(1, fmt.Sprintf(format, args...)) - -} - -// V reports whether verbosity level l is at least the requested verbose level. -func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index aa97273e..517ea706 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,24 +72,3 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go deleted file mode 100644 index f7f40a16..00000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "context" - - "google.golang.org/grpc/internal/buffer" -) - -// CallbackSerializer provides a mechanism to schedule callbacks in a -// synchronized manner. It provides a FIFO guarantee on the order of execution -// of scheduled callbacks. New callbacks can be scheduled by invoking the -// Schedule() method. -// -// This type is safe for concurrent access. -type CallbackSerializer struct { - // done is closed once the serializer is shut down completely, i.e all - // scheduled callbacks are executed and the serializer has deallocated all - // its resources. - done chan struct{} - - callbacks *buffer.Unbounded -} - -// NewCallbackSerializer returns a new CallbackSerializer instance. The provided -// context will be passed to the scheduled callbacks. Users should cancel the -// provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be added once this context is canceled, and any pending un-run -// callbacks will be executed before the serializer is shut down. -func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - cs := &CallbackSerializer{ - done: make(chan struct{}), - callbacks: buffer.NewUnbounded(), - } - go cs.run(ctx) - return cs -} - -// Schedule adds a callback to be scheduled after existing callbacks are run. -// -// Callbacks are expected to honor the context when performing any blocking -// operations, and should return early when the context is canceled. -// -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil -} - -func (cs *CallbackSerializer) run(ctx context.Context) { - defer close(cs.done) - - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() - - // Run all pending callbacks. - for cb := range cs.callbacks.Get() { - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } -} - -// Done returns a channel that is closed after the context passed to -// NewCallbackSerializer is canceled and all callbacks have been executed. -func (cs *CallbackSerializer) Done() <-chan struct{} { - return cs.done -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go deleted file mode 100644 index aef8cec1..00000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "context" - "sync" -) - -// Subscriber represents an entity that is subscribed to messages published on -// a PubSub. It wraps the callback to be invoked by the PubSub when a new -// message is published. -type Subscriber interface { - // OnMessage is invoked when a new message is published. Implementations - // must not block in this method. - OnMessage(msg any) -} - -// PubSub is a simple one-to-many publish-subscribe system that supports -// messages of arbitrary type. It guarantees that messages are delivered in -// the same order in which they were published. -// -// Publisher invokes the Publish() method to publish new messages, while -// subscribers interested in receiving these messages register a callback -// via the Subscribe() method. -// -// Once a PubSub is stopped, no more messages can be published, but any pending -// published messages will be delivered to the subscribers. Done may be used -// to determine when all published messages have been delivered. -type PubSub struct { - cs *CallbackSerializer - - // Access to the below fields are guarded by this mutex. - mu sync.Mutex - msg any - subscribers map[Subscriber]bool -} - -// NewPubSub returns a new PubSub instance. Users should cancel the -// provided context to shutdown the PubSub. -func NewPubSub(ctx context.Context) *PubSub { - return &PubSub{ - cs: NewCallbackSerializer(ctx), - subscribers: map[Subscriber]bool{}, - } -} - -// Subscribe registers the provided Subscriber to the PubSub. -// -// If the PubSub contains a previously published message, the Subscriber's -// OnMessage() callback will be invoked asynchronously with the existing -// message to begin with, and subsequently for every newly published message. -// -// The caller is responsible for invoking the returned cancel function to -// unsubscribe itself from the PubSub. -func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { - ps.mu.Lock() - defer ps.mu.Unlock() - - ps.subscribers[sub] = true - - if ps.msg != nil { - msg := ps.msg - ps.cs.Schedule(func(context.Context) { - ps.mu.Lock() - defer ps.mu.Unlock() - if !ps.subscribers[sub] { - return - } - sub.OnMessage(msg) - }) - } - - return func() { - ps.mu.Lock() - defer ps.mu.Unlock() - delete(ps.subscribers, sub) - } -} - -// Publish publishes the provided message to the PubSub, and invokes -// callbacks registered by subscribers asynchronously. -func (ps *PubSub) Publish(msg any) { - ps.mu.Lock() - defer ps.mu.Unlock() - - ps.msg = msg - for sub := range ps.subscribers { - s := sub - ps.cs.Schedule(func(context.Context) { - ps.mu.Lock() - defer ps.mu.Unlock() - if !ps.subscribers[s] { - return - } - s.OnMessage(msg) - }) - } -} - -// Done returns a channel that is closed after the context passed to NewPubSub -// is canceled and all updates have been sent to subscribers. -func (ps *PubSub) Done() <-chan struct{} { - return ps.cs.Done() -} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go deleted file mode 100644 index fe49cb74..00000000 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package idle contains a component for managing idleness (entering and exiting) -// based on RPC activity. -package idle - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// Enforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type Enforcer interface { - ExitIdleMode() error - EnterIdleMode() -} - -// Manager implements idleness detection and calls the configured Enforcer to -// enter/exit idle mode when appropriate. Must be created by NewManager. -type Manager struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout time.Duration - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and OnCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// NewManager creates a new idleness manager implementation for the -// given idle timeout. It begins in idle mode. -func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { - return &Manager{ - enforcer: enforcer, - timeout: timeout, - actuallyIdle: true, - activeCallsCount: -math.MaxInt32, - } -} - -// resetIdleTimerLocked resets the idle timer to the given duration. Called -// when exiting idle mode or when the timer fires and we need to reset it. -func (m *Manager) resetIdleTimerLocked(d time.Duration) { - if m.isClosed() || m.timeout == 0 || m.actuallyIdle { - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback or when exiting idle mode. - if m.timer != nil { - m.timer.Stop() - } - m.timer = timeAfterFunc(d, m.handleIdleTimeout) -} - -func (m *Manager) resetIdleTimer(d time.Duration) { - m.idleMu.Lock() - defer m.idleMu.Unlock() - m.resetIdleTimerLocked(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (m *Manager) handleIdleTimeout() { - if m.isClosed() { - return - } - - if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(m.timeout) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) - return - } - - // Now that we've checked that there has been no activity, attempt to enter - // idle mode, which is very likely to succeed. - if m.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - m.resetIdleTimer(m.timeout) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *Manager) tryEnterIdleMode() bool { - // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() - // that the channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity in the timer handler, or one was ongoing from before the - // last time the timer fired, or if a test is attempting to enter idle - // mode without checking. In all cases, abort going into idle mode. - return false - } - // N.B. if we fail to enter idle mode after this, we must re-add - // math.MaxInt32 to m.activeCallsCount. - - m.idleMu.Lock() - defer m.idleMu.Unlock() - - if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - return false - } - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // A very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - return false - } - - // No new RPCs have come in since we set the active calls count value to - // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode - // unconditionally now. - m.enforcer.EnterIdleMode() - m.actuallyIdle = true - return true -} - -func (m *Manager) EnterIdleModeForTesting() { - m.tryEnterIdleMode() -} - -// OnCallBegin is invoked at the start of every RPC. -func (m *Manager) OnCallBegin() error { - if m.isClosed() { - return nil - } - - if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := m.ExitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&m.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil -} - -// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's -// internal state. -func (m *Manager) ExitIdleMode() error { - // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. - m.idleMu.Lock() - defer m.idleMu.Unlock() - - if m.isClosed() || !m.actuallyIdle { - // This can happen in three scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and OnCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - Channel is not in idle mode, and the user calls Connect which calls - // m.ExitIdleMode. - // - // In any case, there is nothing to do here. - return nil - } - - if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("failed to exit idle mode: %w", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - m.resetIdleTimerLocked(m.timeout) - return nil -} - -// OnCallEnd is invoked at the end of every RPC. -func (m *Manager) OnCallEnd() { - if m.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&m.activeCallsCount, -1) -} - -func (m *Manager) isClosed() bool { - return atomic.LoadInt32(&m.closed) == 1 -} - -func (m *Manager) Close() { - atomic.StoreInt32(&m.closed, 1) - - m.idleMu.Lock() - if m.timer != nil { - m.timer.Stop() - m.timer = nil - } - m.idleMu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 6c7ea6a5..fd0ee3dc 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,12 +38,8 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // KeepaliveMinServerPingTime is the minimum ping interval for servers. - // This must be 1s by default, but tests may wish to set it lower for - // convenience. - KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig any // func(string) *serviceconfig.ParseResult + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -53,81 +49,44 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder any // func(string) certprovider.Builder + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials - // CanonicalString returns the canonical string of the code defined here: - // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - CanonicalString any // func (codes.Code) string - // IsRegisteredMethod returns whether the passed in method is registered as - // a method on the server. - IsRegisteredMethod any // func(*grpc.Server, string) bool - // ServerFromContext returns the server from the context. - ServerFromContext any // func(context.Context) *grpc.Server + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - AddGlobalServerOptions any // func(opt ...ServerOption) + AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - AddGlobalDialOptions any // func(opt ...DialOption) - // DisableGlobalDialOptions returns a DialOption that prevents the - // ClientConn from applying the global DialOptions (set via - // AddGlobalDialOptions). - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - DisableGlobalDialOptions any // func() grpc.DialOption + AddGlobalDialOptions interface{} // func(opt ...DialOption) // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ClearGlobalDialOptions func() - // JoinDialOptions combines the dial options passed as arguments into a - // single dial option. - JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn - SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -138,7 +97,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -168,34 +127,6 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() - - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) - - // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra - // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" - - // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) - - // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. - ExitIdleModeForTesting any // func(*grpc.ClientConn) error - - ChannelzTurnOffForTesting func() - - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. - FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -206,7 +137,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb71..b2980f8a 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o any) bool { +func (m mdValue) Equal(o interface{}) bool { om, ok := o.(mdValue) if !ok { return false @@ -76,11 +76,33 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate validates every pair in md with ValidatePair. +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. func Validate(md metadata.MD) error { for k, vals := range md { - if err := ValidatePair(k, vals...); err != nil { - return err + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } } } return nil @@ -96,37 +118,3 @@ func hasNotPrintable(msg string) bool { } return false } - -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { - // key should not be empty - if key == "" { - return fmt.Errorf("there is an empty key in the header") - } - // pseudo-header will be ignored - if key[0] == ':' { - return nil - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(key); i++ { - r := key[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) - } - } - if strings.HasSuffix(key, "-bin") { - return nil - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) - } - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 70331913..0177af4b 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e any) string { +func ToJSON(e interface{}) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index f0603871..c7a18a94 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m any) error + SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error + RecvMsg(m interface{}) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index b66dcb21..75301c51 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,6 +23,7 @@ package dns import ( "context" "encoding/json" + "errors" "fmt" "net" "os" @@ -36,7 +37,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,37 +47,53 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) - internal.TimeAfterFunc = time.After - internal.NewNetResolver = newNetResolver - internal.AddressDialer = addressDialer } const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt - // record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) -var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { - return func(ctx context.Context, network, _ string) (net.Conn, error) { +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, address) + return dialer.DialContext(ctx, network, authority) } } -var newNetResolver = func(authority string) (internal.NetResolver, error) { - if authority == "" { - return net.DefaultResolver, nil - } - +var customAuthorityResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -87,7 +103,7 @@ var newNetResolver = func(authority string) (internal.NetResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: internal.AddressDialer(authorityWithPort), + Dial: customAuthorityDialler(authorityWithPort), }, nil } @@ -98,10 +114,9 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of -// the target. +// Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint(), defaultPort) + host, port, err := parseTarget(target.Endpoint, defaultPort) if err != nil { return nil, err } @@ -125,9 +140,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - d.resolver, err = internal.NewNetResolver(target.URL.Host) - if err != nil { - return nil, err + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } } d.wg.Add(1) @@ -140,6 +159,12 @@ func (b *dnsBuilder) Scheme() string { return "dns" } +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -151,26 +176,23 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver internal.NetResolver + resolver netResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the - // target. + // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has - // finished. Otherwise, data race will be possible. [Race Example] in - // dns_resolver_test we replace the real lookup functions with mocked ones to - // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup - // function pointers) inside watcher() goroutine has data race with - // replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this -// dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -196,27 +218,28 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 - // seconds at the very least to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. backoffIndex = 1 - waitTime = internal.MinResolutionRate + timer = newTimerDNSResRate(minDNSResRate) select { case <-d.ctx.Done(): + timer.Stop() return case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from - // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): + timer.Stop() return - case <-internal.TimeAfterFunc(waitTime): + case <-timer.C: } } } @@ -255,8 +278,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - dnsErr, ok := err.(*net.DNSError) - if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -285,12 +307,10 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as - // service config. + // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service - // config. + // This is not an error; it is the equivalent of not having a service config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -332,10 +352,9 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -347,10 +366,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns -// formatted host and port info. If target doesn't specify a port, set the port -// to be the defaultPort. If target is in IPv6 format and host-name is enclosed -// in square brackets, brackets are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -358,7 +377,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", internal.ErrMissingAddr + return "", "", errMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -366,14 +385,12 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", - // this is an error. - return "", "", internal.ErrEndsWithColon + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", - // the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go deleted file mode 100644 index c7fc557d..00000000 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains functionality internal to the dns resolver package. -package internal - -import ( - "context" - "errors" - "net" - "time" -) - -// NetResolver groups the methods on net.Resolver that are used by the DNS -// resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. -type NetResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - -var ( - // ErrMissingAddr is the error returned when building a DNS resolver when - // the provided target name is empty. - ErrMissingAddr = errors.New("dns resolver: missing address") - - // ErrEndsWithColon is the error returned when building a DNS resolver when - // the provided target name ends with a colon that is supposed to be the - // separator between host and port. E.g. "::" is a valid address as it is - // an IPv6 address (host only) and "[::]:" is invalid as it ends with a - // colon as the host and port separator - ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -// The following vars are overridden from tests. -var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - - // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test - // code, this can be used to control the amount of time the resolver is - // blocked waiting for the duration to elapse. - TimeAfterFunc func(time.Duration) <-chan time.Time - - // NewNetResolver returns the net.Resolver instance for the given target. - NewNetResolver func(string) (NetResolver, error) - - // AddressDialer is the dialer used to dial the DNS server. It accepts the - // Host portion of the URL corresponding to the user's dial target and - // returns a dial function. - AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) -) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac5657..520d9229 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,20 +20,13 @@ // name without scheme back to gRPC as resolved address. package passthrough -import ( - "errors" - - "google.golang.org/grpc/resolver" -) +import "google.golang.org/grpc/resolver" const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - if target.Endpoint() == "" && opts.Dialer == nil { - return nil, errors.New("passthrough: received empty target in Build()") - } r := &passthroughResolver{ target: target, cc: cc, @@ -52,7 +45,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 27cd81af..7f1a702c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.URL.Host != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } // gRPC was parsing the dial target manually before PR #4817, and we @@ -61,10 +61,6 @@ func (b *builder) Scheme() string { return b.scheme } -func (b *builder) OverrideAuthority(resolver.Target) string { - return "localhost" -} - type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go deleted file mode 100644 index 11d82afc..00000000 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package serviceconfig - -import ( - "encoding/json" - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// Duration defines JSON marshal and unmarshal methods to conform to the -// protobuf JSON spec defined [here]. -// -// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration -type Duration time.Duration - -func (d Duration) String() string { - return fmt.Sprint(time.Duration(d)) -} - -// MarshalJSON converts from d to a JSON string output. -func (d Duration) MarshalJSON() ([]byte, error) { - ns := time.Duration(d).Nanoseconds() - sec := ns / int64(time.Second) - ns = ns % int64(time.Second) - - var sign string - if sec < 0 || ns < 0 { - sign, sec, ns = "-", -1*sec, -1*ns - } - - // Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision. - str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, ".000") - return []byte(fmt.Sprintf("\"%ss\"", str)), nil -} - -// UnmarshalJSON unmarshals b as a duration JSON string into d. -func (d *Duration) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !strings.HasSuffix(s, "s") { - return fmt.Errorf("malformed duration %q: missing seconds unit", s) - } - neg := false - if s[0] == '-' { - neg = true - s = s[1:] - } - ss := strings.SplitN(s[:len(s)-1], ".", 3) - if len(ss) > 2 { - return fmt.Errorf("malformed duration %q: too many decimals", s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var sec, ns int64 - if len(ss[0]) > 0 { - var err error - if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - // Maximum seconds value per the durationpb spec. - const maxProtoSeconds = 315_576_000_000 - if sec > maxProtoSeconds { - return fmt.Errorf("out of range: %q", s) - } - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return fmt.Errorf("malformed duration %q: too many digits after decimal", s) - } - var err error - if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - for i := 9; i > len(ss[1]); i-- { - ns *= 10 - } - hasDigits = true - } - if !hasDigits { - return fmt.Errorf("malformed duration %q: contains no numbers", s) - } - - if neg { - sec *= -1 - ns *= -1 - } - - // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. - const maxSeconds = math.MaxInt64 / int64(time.Second) - const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) - const minSeconds = math.MinInt64 / int64(time.Second) - const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) - - if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { - *d = Duration(math.MaxInt64) - } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { - *d = Duration(math.MinInt64) - } else { - *d = Duration(sec*int64(time.Second) + ns) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 03ef2fed..b0ead4f5 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,41 +43,13 @@ type Status struct { s *spb.Status } -// NewWithProto returns a new status including details from statusProto. This -// is meant to be used by the gRPC library only. -func NewWithProto(code codes.Code, message string, statusProto []string) *Status { - if len(statusProto) != 1 { - // No grpc-status-details bin header, or multiple; just ignore. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - st := &spb.Status{} - if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { - // Probably not a google.rpc.Status proto; do not provide details. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - if st.Code == int32(code) { - // The codes match between the grpc-status header and the - // grpc-status-details-bin header; use the full details proto. - return &Status{s: st} - } - return &Status{ - s: &spb.Status{ - Code: int32(codes.Internal), - Message: fmt.Sprintf( - "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", - code, message, st, - ), - }, - } -} - // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...any) *Status { +func Newf(c codes.Code, format string, a ...interface{}) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -92,7 +64,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...any) error { +func Errorf(c codes.Code, format string, a ...interface{}) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -148,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []any { +func (s *Status) Details() []interface{} { if s == nil || s.s == nil { return nil } - details := make([]any, 0, len(s.s.Details)) + details := make([]interface{}, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go deleted file mode 100644 index 4f347edd..00000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !unix && !windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" -) - -// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{} -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go deleted file mode 100644 index 078137b7..00000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build unix - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go deleted file mode 100644 index fd7d43a8..00000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b330cced..409769f4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,7 +22,6 @@ import ( "bytes" "errors" "fmt" - "net" "runtime" "strconv" "sync" @@ -30,7 +29,6 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -40,7 +38,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it any + it interface{} next *itemNode } @@ -49,7 +47,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i any) { +func (il *itemList) enqueue(i interface{}) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +59,11 @@ func (il *itemList) enqueue(i any) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() any { +func (il *itemList) peek() interface{} { return il.head.it } -func (il *itemList) dequeue() any { +func (il *itemList) dequeue() interface{} { if il.head == nil { return nil } @@ -193,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn bool } func (*goAway) isTransportResponseFrame() bool { return false } @@ -211,14 +209,6 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } -// closeConnection is an instruction to tell the loopy writer to flush the -// framer and exit, which will cause the transport's connection to be closed -// (by the client or server). The transport itself will close after the reader -// encounters the EOF caused by the connection closure. -type closeConnection struct{} - -func (closeConnection) isTransportResponseFrame() bool { return false } - type outStreamState int const ( @@ -336,7 +326,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +363,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +377,7 @@ func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { return true, nil } -func (c *controlBuffer) get(block bool) (any, error) { +func (c *controlBuffer) get(block bool) (interface{}, error) { for { c.mu.Lock() if c.err != nil { @@ -418,7 +408,7 @@ func (c *controlBuffer) get(block bool) (any, error) { select { case <-c.ch: case <-c.done: - return nil, errors.New("transport closed by client") + return nil, ErrConnClosing } } } @@ -488,14 +478,12 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool - conn net.Conn - logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -508,8 +496,6 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, - conn: conn, - logger: logger, } return l } @@ -527,26 +513,23 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the -// activeStreams linked-list. This results in writing of HTTP2 frames into an -// underlying write buffer. When there's no more control frames to read from -// controlBuf, loopy flushes the write buffer. As an optimization, to increase -// the batch size for each flush, loopy yields the processor, once if the batch -// size is too low to give stream goroutines a chance to fill it up. -// -// Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { defer func() { - if l.logger.V(logLevel) { - l.logger.Infof("loopyWriter exiting with error: %v", err) - } - if !isIOError(err) { - l.framer.writer.Flush() - l.conn.Close() + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } + err = nil } - l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -591,6 +574,7 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata + } } } @@ -599,11 +583,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return + return nil } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -611,9 +595,10 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return + return nil } } + return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -621,11 +606,13 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - l.applySettings(s.ss) + if err := l.applySettings(s.ss); err != nil { + return err + } return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) { +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { str := &outStream{ id: h.streamID, state: empty, @@ -633,14 +620,15 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) { wq: h.wq, } l.estdStreams[h.streamID] = str + return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if l.logger.V(logLevel) { - l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) } return nil } @@ -667,19 +655,18 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - return l.originateStream(str, h) + str.itl.enqueue(h) + return l.originateStream(str) } -func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { - // l.draining is set when handling GoAway. In which case, we want to avoid - // creating new streams. - if l.draining { - // TODO: provide a better error with the reason we are in draining. - hdr.onOrphaned(errStreamDrain) - return nil - } +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - return err + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -695,8 +682,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if l.logger.V(logLevel) { - l.logger.Warningf("Encountered error while encoding headers: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) } } } @@ -734,10 +721,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) { +func (l *loopyWriter) preprocessData(df *dataFrame) error { str, ok := l.estdStreams[df.streamID] if !ok { - return + return nil } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -746,6 +733,7 @@ func (l *loopyWriter) preprocessData(df *dataFrame) { str.state = active l.activeStreams.enqueue(str) } + return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -756,8 +744,9 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { o.resp <- l.sendQuota + return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -774,9 +763,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.draining && len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. - return errors.New("finished processing active streams while in draining mode") + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing } return nil } @@ -811,8 +799,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. - return errors.New("received GOAWAY with no active streams") + return ErrConnClosing } } return nil @@ -830,10 +817,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i any) error { +func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - l.incomingWindowUpdateHandler(i) + return l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -843,7 +830,7 @@ func (l *loopyWriter) handle(i any) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - l.registerStreamHandler(i) + return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -851,24 +838,19 @@ func (l *loopyWriter) handle(i any) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - l.preprocessData(i) + return l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - l.outFlowControlSizeRequestHandler(i) - case closeConnection: - // Just return a non-I/O error and run() will flush and close the - // connection. - return ErrConnClosing + return l.outFlowControlSizeRequestHandler(i) default: return fmt.Errorf("transport: unknown control message type %T", i) } - return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) { +func (l *loopyWriter) applySettings(ss []http2.Setting) error { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -887,6 +869,7 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) { updateHeaderTblSize(l.hEnc, s.Val) } } + return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -920,7 +903,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, err + return false, nil } } else { l.activeStreams.enqueue(str) diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index bc8ee074..9fa306b2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,9 +47,3 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) - -// MaxStreamID is the upper bound for the stream ID before the current -// transport gracefully closes and new transport is created for subsequent RPCs. -// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit -// integer. It's exported so that tests can override it. -var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index a9d70e2a..fb272235 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,7 +39,6 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -47,65 +46,40 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC from -// inside an http.Handler, or writes an HTTP error to w and returns an error. -// It requires that the http Server supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) + return nil, errors.New("gRPC requires HTTP/2") } if r.Method != "POST" { - msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) + return nil, errors.New("invalid gRPC request method") } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) - http.Error(w, msg, http.StatusUnsupportedMediaType) - return nil, errors.New(msg) + return nil, errors.New("invalid gRPC request content-type") } if _, ok := w.(http.Flusher); !ok { - msg := "gRPC requires a ResponseWriter supporting http.Flusher" - http.Error(w, msg, http.StatusInternalServerError) - return nil, errors.New(msg) + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") } - var localAddr net.Addr - if la := r.Context().Value(http.LocalAddrContextKey); la != nil { - localAddr, _ = la.(net.Addr) - } - var authInfo credentials.AuthInfo - if r.TLS != nil { - authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - p := peer.Peer{ - Addr: strAddr(r.RemoteAddr), - LocalAddr: localAddr, - AuthInfo: authInfo, - } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), - peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, } - st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - msg := fmt.Sprintf("malformed grpc-timeout: %v", err) - http.Error(w, msg, http.StatusBadRequest) - return nil, status.Error(codes.Internal, msg) + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) } st.timeoutSet = true st.timeout = to @@ -123,9 +97,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) - http.Error(w, msg, http.StatusBadRequest) - return nil, status.Error(codes.Internal, msg) + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } @@ -148,8 +120,6 @@ type serverHandlerTransport struct { headerMD metadata.MD - peer peer.Peer - closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -168,26 +138,16 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler - logger *grpclog.PrefixLogger + stats []stats.Handler } -func (ht *serverHandlerTransport) Close(err error) { - ht.closeOnce.Do(func() { - if ht.logger.V(logLevel) { - ht.logger.Infof("Closing: %v", err) - } - close(ht.closedCh) - }) +func (ht *serverHandlerTransport) Close() { + ht.closeOnce.Do(ht.closeCloseChanOnce) } -func (ht *serverHandlerTransport) Peer() *peer.Peer { - return &peer.Peer{ - Addr: ht.peer.Addr, - LocalAddr: ht.peer.LocalAddr, - AuthInfo: ht.peer.AuthInfo, - } -} +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -242,20 +202,18 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } - s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { - delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) } - if len(s.trailer) > 0 { - for k, vv := range s.trailer { + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -267,7 +225,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -279,7 +236,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close(errors.New("finished writing status")) + ht.Close() return err } @@ -312,7 +269,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus) +// on the first write call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -369,8 +326,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -387,22 +346,37 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream case <-ht.req.Context().Done(): } cancel() - ht.Close(errors.New("request is done processing")) + ht.Close() }() - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req + s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - headerWireLength: 0, // won't have access to header wire length until golang/go#18997. + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -461,7 +435,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain() { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c33ac596..d518b07e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,15 +36,13 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - isyscall "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -59,19 +57,13 @@ import ( // atomically. var clientConnectionCounter uint64 -var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) - // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string - // address contains the resolver returned address for this transport. - // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` - // passed to `NewStream`, when determining the :authority header. - address resolver.Address + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -144,12 +136,12 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onClose func(GoAwayReason) + onGoAway func(GoAwayReason) + onClose func() bufferPool *bufferPool connectionID uint64 - logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -179,7 +171,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) + return (&net.Dialer{}).DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -201,7 +193,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -221,7 +213,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection @@ -246,11 +238,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if err := connectCtx.Err(); err != nil { + if connectCtx.Err() != nil { // connectCtx expired before exiting the function. Hard close the connection. - if logger.V(logLevel) { - logger.Infof("Aborting due to connect deadline expiring: %v", err) - } conn.Close() } }(conn) @@ -265,7 +254,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -325,7 +314,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), - address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -333,7 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -347,11 +335,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), + onGoAway: onGoAway, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } - t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -450,8 +438,17 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -496,9 +493,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil - LocalAddr: t.localAddr, + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil } } @@ -570,7 +566,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -706,18 +702,6 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) - - // ServerName field of the resolver returned address takes precedence over - // Host field of CallHdr to determine the :authority header. This is because, - // the ServerName field takes precedence for server authentication during - // TLS handshake, and the :authority header should match the value used - // for server authentication. - if t.address.ServerName != "" { - newCallHdr := *callHdr - newCallHdr.Host = t.address.ServerName - callHdr = &newCallHdr - } - headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -742,12 +726,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - // TODO: handle transport closure in loopy instead and remove this - // initStream is never called when transport is draining. - if t.state == closing { + if state := t.state; state != reachable { t.mu.Unlock() - cleanup(ErrConnClosing) - return ErrConnClosing + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -765,8 +752,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} - transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -781,15 +767,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 - - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + if t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } @@ -804,7 +785,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func(it interface{}) bool { if t.maxSendHeaderListSize == nil { return true } @@ -819,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -865,12 +846,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } - if transportDrainRequired { - if t.logger.V(logLevel) { - t.logger.Infof("Draining transport: t.nextID > MaxStreamID") - } - t.GracefulClose() - } return s, nil } @@ -931,7 +906,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func(interface{}) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -959,14 +934,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) - } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. - if t.state != draining { - t.onClose(GoAwayInvalid) - } + t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1016,15 +986,11 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("GracefulClose called") - } - t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + t.Close(ErrConnClosing) return } t.controlBuf.put(&incomingGoAway{}) @@ -1084,7 +1050,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1181,8 +1147,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if t.logger.V(logLevel) { - t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) } statusCode = codes.Unknown } @@ -1237,7 +1203,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func(interface{}) bool { for _, f := range updateFuncs { f() } @@ -1264,12 +1230,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { - // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug - // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is - // enabled by default and double the configure KEEPALIVE_TIME used for new connections - // on that channel. - logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1302,10 +1266,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - if t.state != draining { - t.onClose(t.goAwayReason) - t.state = draining - } + t.onGoAway(t.goAwayReason) + t.state = draining } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1325,8 +1287,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } t.mu.Unlock() @@ -1339,7 +1303,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutex to be held by +// It expects a lock on transport's mutext to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1401,6 +1365,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string + statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1435,6 +1400,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1500,15 +1471,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - // For headers, set them in s.header and close headerChan. For trailers or - // trailers-only, closeStream will set the trailers and close headerChan as - // needed. - if !endStream { - // If headerChan hasn't been closed yet (expected, given we checked it - // above, but something else could have potentially closed the whole - // stream). - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true + isHeader := false + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1516,12 +1486,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - close(s.headerChan) + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true } + close(s.headerChan) } for _, sh := range t.statsHandlers { - if !endStream { + if isHeader { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1543,12 +1516,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } - // If client received END_STREAM from server while stream was still active, - // send RST_STREAM. - rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // readServerPreface reads and handles the initial settings frame from the @@ -1782,9 +1756,3 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } - -func (t *http2Client) stateForTesting() transportState { - t.mu.Lock() - defer t.mu.Unlock() - return t.state -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f6bac0e8..3dd15647 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -35,16 +34,13 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -68,15 +64,18 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - loopyWriterDone chan struct{} - peer peer.Peer - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -102,13 +101,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainEvent is initialized when Drain() is called the first time. After - // which the server writes out the first GoAway(with ID 2^31-1) frame. Then - // an independent goroutine will be launched to later send the second - // GoAway. During this time we don't want to write another first GoAway(with - // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is - // already initialized since draining is already underway. - drainEvent *grpcsync.Event + // drainChan is initialized when Drain() is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -128,8 +127,6 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen - - logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -162,16 +159,21 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - if config.MaxStreams != math.MaxUint32 { + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: config.MaxStreams, + Val: maxStreams, }) } dynamicWindow := true @@ -230,7 +232,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -240,19 +242,17 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) - peer := peer.Peer{ - Addr: conn.RemoteAddr(), - LocalAddr: conn.LocalAddr(), - AuthInfo: authInfo, - } t := &http2Server{ + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - peer: peer, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), - loopyWriterDone: make(chan struct{}), - maxStreams: config.MaxStreams, + writerDone: make(chan struct{}), + maxStreams: maxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -265,7 +265,8 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } - t.logger = prefixLoggerForServerTransport(t) + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -274,7 +275,15 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) if err != nil { return nil, err } @@ -284,7 +293,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close(err) + t.Close() } }() @@ -320,18 +329,23 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() - close(t.loopyWriterDone) + if err := t.loopy.run(); err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeaders takes action on the decoded headers. Returns an error if fatal -// error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -347,32 +361,33 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return nil + return false } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true } t.maxStreamID = streamID buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - headerWireLength: int(frame.Header().Length), + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // if false, content-type was missing or invalid - isGRPC = false - contentType = "" - mdata = make(metadata.MD, len(frame.Fields)) - httpMethod string - // these are set if an error is encountered while parsing the headers - protocolError bool - headerError *status.Status + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool timeoutSet bool timeout time.Duration @@ -383,23 +398,11 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { - contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true - - case "grpc-accept-encoding": - mdata[hf.Name] = append(mdata[hf.Name], hf.Value) - if hf.Value == "" { - continue - } - compressors := hf.Value - if s.clientAdvertisedCompressors != "" { - compressors = s.clientAdvertisedCompressors + "," + compressors - } - s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -410,23 +413,23 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + headerError = true } // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if t.logger.V(logLevel) { - t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } - protocolError = true + headerError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -440,47 +443,27 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusBadRequest, + httpStatus: 400, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return nil + return false } - if protocolError { + if !isGRPC || headerError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return nil - } - if !isGRPC { - t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusUnsupportedMediaType, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), - rst: !frame.StreamEnded(), - }) - return nil - } - if headerError != nil { - t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusBadRequest, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: headerError, - rst: !frame.StreamEnded(), - }) - return nil + return false } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -501,9 +484,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(ctx) + s.ctx, s.cancel = context.WithCancel(t.ctx) } // Attach the received metadata to the context. @@ -520,7 +503,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade if t.state != reachable { t.mu.Unlock() s.cancel() - return nil + return false } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -531,13 +514,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade onWrite: func() {}, }) s.cancel() - return nil + return false } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + if logger.V(logLevel) { + logger.Infof("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -547,14 +530,14 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade rst: !frame.StreamEnded(), }) s.cancel() - return nil + return false } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -567,7 +550,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade status: stat, rst: !frame.StreamEnded(), }) - return nil + return false } } t.activeStreams[streamID] = s @@ -582,6 +565,19 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } + s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inHeader) + } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -601,25 +597,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade wq: s.wq, }) handle(s) - return nil + return false } // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { - defer func() { - <-t.loopyWriterDone - close(t.readerDone) - }() +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if t.logger.V(logLevel) { - t.logger.Warningf("Encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -637,16 +630,19 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close(err) + t.Close() return } - t.Close(err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } + t.Close() return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(ctx, frame, handle); err != nil { - t.Close(err) + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() break } case *http2.DataFrame: @@ -662,8 +658,8 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if t.logger.V(logLevel) { - t.logger.Infof("Received unsupported frame type %T", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -830,7 +826,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func(interface{}) bool { for _, f := range updateFuncs { f() } @@ -847,8 +843,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainEvent != nil { - t.drainEvent.Fire() + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) return } // Maybe it's a BDP ping. @@ -890,7 +886,10 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -914,7 +913,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it any) bool { +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { if t.maxSendHeaderListSize == nil { return true } @@ -922,8 +921,8 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if t.logger.V(logLevel) { - t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -960,12 +959,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - switch e := err.(type) { - case ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - return status.Convert(err).Err() - } + return status.Convert(err).Err() } return nil } @@ -1038,15 +1032,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { - // Do not use the user's grpc-status-details-bin (if present) if we are - // even attempting to set our own. - delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } } @@ -1149,20 +1140,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain("max_idle") + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain("max_age") + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if t.logger.V(logLevel) { - t.logger.Infof("Closing server transport due to maximum connection age") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") } - t.controlBuf.put(closeConnection{}) + t.Close() case <-t.done: } return @@ -1178,7 +1169,10 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } + t.Close() return } if !outstandingPing { @@ -1205,29 +1199,30 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close(err error) { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) - } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { - t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } + for _, sh := range t.stats { + connEnd := &stats.ConnEnd{} + sh.HandleConn(t.ctx, connEnd) + } } // deleteStream deletes the stream s from transport's active streams. @@ -1293,14 +1288,18 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) Drain(debugData string) { +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainEvent != nil { + if t.drainChan != nil { return } - t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1320,17 +1319,19 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID - retErr := g.closeConn if len(t.activeStreams) == 0 { - retErr = errors.New("second GOAWAY written and no active streams left to process") + g.closeConn = true } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if retErr != nil { - return false, retErr + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") } return true, nil } @@ -1342,7 +1343,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { @@ -1352,7 +1353,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainEvent.Done(): + case <-t.drainChan: case <-timer.C: case <-t.done: return @@ -1375,11 +1376,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.peer.LocalAddr, - RemoteAddr: t.peer.Addr, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, // RemoteName : } - if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1411,12 +1412,10 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -// Peer returns the peer of the transport. -func (t *http2Server) Peer() *peer.Peer { +func (t *http2Server) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.peer.Addr, - LocalAddr: t.peer.LocalAddr, - AuthInfo: t.peer.AuthInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil } } @@ -1441,6 +1440,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func SetConnection(ctx context.Context, conn net.Conn) context.Context { +func setConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index dc29d590..2c601a86 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -21,7 +21,6 @@ package transport import ( "bufio" "encoding/base64" - "errors" "fmt" "io" "math" @@ -30,13 +29,16 @@ import ( "net/url" "strconv" "strings" - "sync" "time" "unicode/utf8" + "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) const ( @@ -83,10 +85,9 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) -var grpcStatusDetailsBinHeader = "grpc-status-details-bin" - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,6 +103,7 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", + "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -152,6 +154,18 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + type timeoutUnit uint8 const ( @@ -296,7 +310,6 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { - pool *sync.Pool buf []byte offset int batchSize int @@ -304,17 +317,12 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { - w := &bufWriter{ +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), batchSize: batchSize, conn: conn, - pool: pool, } - // this indicates that we should use non shared buf - if pool == nil { - w.buf = make([]byte, batchSize) - } - return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -322,12 +330,7 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) - return n, toIOError(err) - } - if w.buf == nil { - b := w.pool.Get().(*[]byte) - w.buf = *b + return w.conn.Write(b) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -335,24 +338,13 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + err = w.Flush() } } return n, err } func (w *bufWriter) Flush() error { - err := w.flushKeepBuffer() - // Only release the buffer if we are in a "shared" mode - if w.buf != nil && w.pool != nil { - b := w.buf - w.pool.Put(&b) - w.buf = nil - } - return err -} - -func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -360,39 +352,16 @@ func (w *bufWriter) flushKeepBuffer() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) - w.err = toIOError(w.err) w.offset = 0 return w.err } -type ioError struct { - error -} - -func (i ioError) Unwrap() error { - return i.error -} - -func isIOError(err error) bool { - return errors.As(err, &ioError{}) -} - -func toIOError(err error) error { - if err == nil { - return nil - } - return ioError{error: err} -} - type framer struct { writer *bufWriter fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) -var writeBufferMutex sync.Mutex - -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -400,11 +369,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - var pool *sync.Pool - if sharedWriteBuffer { - pool = getWriteBufferPool(writeBufferSize) - } - w := newBufWriter(conn, writeBufferSize, pool) + w := newBufWriter(conn, writeBufferSize) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -418,24 +383,6 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } -func getWriteBufferPool(writeBufferSize int) *sync.Pool { - writeBufferMutex.Lock() - defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 - pool, ok := writeBufferPoolMap[size] - if ok { - return pool - } - pool = &sync.Pool{ - New: func() any { - b := make([]byte, size) - return &b - }, - } - writeBufferPoolMap[size] = pool - return pool -} - // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go deleted file mode 100644 index 42ed2b07..00000000 --- a/vendor/google.golang.org/grpc/internal/transport/logging.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -var logger = grpclog.Component("transport") - -func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) -} - -func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) -} - -func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032..41596198 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,8 +28,6 @@ import ( "net/http" "net/http/httputil" "net/url" - - "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -114,7 +112,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -124,15 +122,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error newAddr = proxyURL.Host } - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) if err != nil { - return nil, err + return } - if proxyURL == nil { + if proxyURL != nil { // proxy is disabled if proxyURL is nil. - return conn, err + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index b7b8fec1..2e615ee2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,13 +37,16 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -53,7 +56,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() any { + New: func() interface{} { return new(bytes.Buffer) }, }, @@ -254,9 +257,6 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -266,8 +266,7 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. + headerValid bool // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -346,24 +345,8 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str } // Done returns a channel which is closed when it receives the final status @@ -388,10 +371,14 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid || s.noHeaders { + if !s.headerValid { return nil, s.status.Err() } + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -427,12 +414,6 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -445,12 +426,6 @@ func (s *Stream) Status() *status.Status { return s.status } -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -565,7 +540,6 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -599,8 +573,6 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int - // SharedWriteBuffer indicates whether connections should reuse write buffer - SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -611,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message @@ -712,7 +684,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) + HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -729,13 +701,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close(err error) + Close() - // Peer returns the peer of the server transport. - Peer() *peer.Peer + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain(debugData string) + Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -745,7 +717,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd..fb4a88f5 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,14 +25,8 @@ import ( "context" "fmt" "strings" - - "google.golang.org/grpc/internal" ) -func init() { - internal.FromOutgoingContextRaw = fromOutgoingContextRaw -} - // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -97,11 +91,7 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - out := make(MD, len(md)) - for k, v := range md { - out[k] = copyOf(v) - } - return out + return Join(md) } // Get obtains the values for a given key. @@ -159,16 +149,14 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. md must -// not be modified after calling this function. +// NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. md must not be modified after -// calling this function. +// overwrite any previously-appended metadata. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -183,11 +171,8 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - kvCopy := make([]string, 0, len(kv)) - for i := 0; i < len(kv); i += 2 { - kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) - } - added[len(added)-1] = kvCopy + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } @@ -211,8 +196,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Keys are matched in a case insensitive -// manner. +// key from the incoming metadata if it exists. Key must be lower-case. // // # Experimental // @@ -228,29 +212,33 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee - // that the MD attached to the context is created using our helper - // functions. - if strings.EqualFold(k, key) { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { return copyOf(v) } } return nil } +// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index a821ff9b..e01d219f 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,8 +32,6 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr - // LocalAddr is the local address. - LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bf56faa7..a5d5516e 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,25 +28,20 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker - statsHandlers []stats.Handler // to record blocking picker calls + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{})} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -63,16 +58,12 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -// doneChannelzWrapper performs the following: -// - increments the calls started channelz counter -// - wraps the done function in the passed in result to increment the calls -// failed or calls succeeded channelz counter before invoking the actual -// done function. -func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { - ac := acbw.ac +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() ac.incrCallsStarted() - done := result.Done - result.Done = func(b balancer.DoneInfo) { + return func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -91,16 +82,15 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { var ch chan struct{} var lastPickErr error - for { pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, balancer.PickResult{}, ErrClientConnClosing + return nil, nil, ErrClientConnClosing } if pw.picker == nil { @@ -121,34 +111,21 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return nil, nil, status.Error(codes.Canceled, errStr) } case <-ch: } continue } - // If the channel is set, it means that the pick call had to wait for a - // new picker at some point. Either it's the first iteration and this - // function received the first picker, or a picker errored with - // ErrNoSubConnAvailable or errored with failfast set to false, which - // will trigger a continue to the next iteration. In the first case this - // conditional will hit if this call had to block (the channel is set). - // In the second case, the only way it will get to this conditional is - // if there is a new picker. - if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } - } - ch = pw.blockingCh p := pw.picker pw.mu.Unlock() pickResult, err := p.Pick(info) + if err != nil { if err == balancer.ErrNoSubConnAvailable { continue @@ -159,7 +136,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -167,20 +144,19 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return nil, nil, status.Error(codes.Unavailable, err.Error()) } - acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acbw.ac.getReadyTransport(); t != nil { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil + return t, doneChannelzWrapper(acw, pickResult.Done), nil } - return t, pickResult, nil + return t, pickResult.Done, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -205,17 +181,6 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -// reset clears the pickerWrapper and prepares it for being used again when idle -// mode is exited. -func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) -} - // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5128f936..fb7a99e0 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,24 +19,15 @@ package grpc import ( - "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" ) -const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " -) +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -45,42 +36,22 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b + return &pickfirstBalancer{cc: cc} } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -98,49 +69,28 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -152,29 +102,24 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, }) b.subConn.Connect() return nil } -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } if b.subConn != subConn { - if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } + b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -187,21 +132,11 @@ func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. - return - } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() - return - } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -212,7 +147,6 @@ func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } - b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd6336..cd455478 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg any) error { +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md index 9ace83cc..04b6371a 100644 --- a/vendor/google.golang.org/grpc/reflection/README.md +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -2,7 +2,7 @@ Package reflection implements server reflection service. -The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: ```go diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go deleted file mode 100644 index 33b907a3..00000000 --- a/vendor/google.golang.org/grpc/reflection/adapt.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package reflection - -import ( - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// asV1Alpha returns an implementation of the v1alpha version of the reflection -// interface that delegates all calls to the given v1 version. -func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { - return v1AlphaServerImpl{svr: svr} -} - -type v1AlphaServerImpl struct { - svr v1reflectiongrpc.ServerReflectionServer -} - -func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) -} - -type v1AlphaServerStreamAdapter struct { - v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer -} - -func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { - return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) -} - -func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { - resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() - if err != nil { - return nil, err - } - return v1AlphaToV1Request(resp), nil -} - -func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { - var v1alpha v1alphareflectionpb.ServerReflectionResponse - v1alpha.ValidHost = v1.ValidHost - if v1.OriginalRequest != nil { - v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) - } - switch mr := v1.MessageResponse.(type) { - case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1alphareflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphareflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1alpha -} - -func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { - var v1 v1reflectionpb.ServerReflectionRequest - v1.Host = v1alpha.Host - switch mr := v1alpha.MessageRequest.(type) { - case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr.FileContainingExtension != nil { - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1reflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - case *v1alphareflectionpb.ServerReflectionRequest_ListServices: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - default: - // no value set - } - return &v1 -} - -func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { - var v1alpha v1alphareflectionpb.ServerReflectionRequest - v1alpha.Host = v1.Host - switch mr := v1.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - } - default: - // no value set - } - return &v1alpha -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 69fbfb62..c22f9a52 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 The gRPC Authors +// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,20 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// Service exported by server reflection -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto +// Service exported by server reflection // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha import ( + proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,15 +37,16 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + // The message sent by the client when calling ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their @@ -65,7 +65,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -78,7 +78,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -91,10 +91,9 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetHost() string { if x != nil { return x.Host @@ -109,7 +108,6 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_ return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename @@ -117,7 +115,6 @@ func (x *ServerReflectionRequest) GetFileByFilename() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { return x.FileContainingSymbol @@ -125,7 +122,6 @@ func (x *ServerReflectionRequest) GetFileContainingSymbol() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { return x.FileContainingExtension @@ -133,7 +129,6 @@ func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { return x.AllExtensionNumbersOfType @@ -141,7 +136,6 @@ func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { return x.ListServices @@ -155,8 +149,6 @@ type isServerReflectionRequest_MessageRequest interface { type ServerReflectionRequest_FileByFilename struct { // Find a proto file by the file name. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` } @@ -164,16 +156,12 @@ type ServerReflectionRequest_FileContainingSymbol struct { // Find the proto file that declares the given fully-qualified symbol name. // This field should be a fully-qualified symbol name // (e.g. .[.] or .). - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` } type ServerReflectionRequest_FileContainingExtension struct { // Find the proto file which defines an extension extending the given // message type with the given field number. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` } @@ -186,16 +174,12 @@ type ServerReflectionRequest_AllExtensionNumbersOfType struct { // StatusCode::UNIMPLEMENTED if it's not implemented. // This field should be a fully-qualified type name. The format is // . - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` } type ServerReflectionRequest_ListServices struct { // List the full names of registered services. The content will not be // checked. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` } @@ -212,25 +196,20 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Fully-qualified type name. The format should be . - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -243,7 +222,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -256,10 +235,9 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetContainingType() string { if x != nil { return x.ContainingType @@ -267,7 +245,6 @@ func (x *ExtensionRequest) GetContainingType() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetExtensionNumber() int32 { if x != nil { return x.ExtensionNumber @@ -276,19 +253,15 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { } // The message sent by the server to answer ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server set one of the following fields according to the message_request - // in the request. + // The server sets one of the following fields according to the + // message_request in the request. // // Types that are assignable to MessageResponse: // @@ -302,7 +275,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -315,7 +288,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -328,10 +301,9 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetValidHost() string { if x != nil { return x.ValidHost @@ -339,7 +311,6 @@ func (x *ServerReflectionResponse) GetValidHost() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if x != nil { return x.OriginalRequest @@ -354,7 +325,6 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { return x.FileDescriptorResponse @@ -362,7 +332,6 @@ func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorRe return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { return x.AllExtensionNumbersResponse @@ -370,7 +339,6 @@ func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNu return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { return x.ListServicesResponse @@ -378,7 +346,6 @@ func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceRespons return nil } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { return x.ErrorResponse @@ -392,34 +359,26 @@ type isServerReflectionResponse_MessageResponse interface { type ServerReflectionResponse_FileDescriptorResponse struct { // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. As - // the repeated label is not allowed in oneof fields, we use a + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requst. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + // This message is used to answer all_extension_numbers_of_type requests. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services request. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + // This message is used to answer list_services requests. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } type ServerReflectionResponse_ErrorResponse struct { // This message is used when an error occurs. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } @@ -436,8 +395,6 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -446,15 +403,13 @@ type FileDescriptorResponse struct { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -467,7 +422,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -480,10 +435,9 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { if x != nil { return x.FileDescriptorProto @@ -493,8 +447,6 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -502,17 +454,14 @@ type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -525,7 +474,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -538,10 +487,9 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetBaseTypeName() string { if x != nil { return x.BaseTypeName @@ -549,7 +497,6 @@ func (x *ExtensionNumberResponse) GetBaseTypeName() string { return "" } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { if x != nil { return x.ExtensionNumber @@ -558,8 +505,6 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { } // A list of ServiceResponse sent by the server answering list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -567,15 +512,13 @@ type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -588,7 +531,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -601,10 +544,9 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ListServiceResponse) GetService() []*ServiceResponse { if x != nil { return x.Service @@ -614,8 +556,6 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -623,15 +563,13 @@ type ServiceResponse struct { // Full name of a registered service, including its package name. The format // is . - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -644,7 +582,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -657,10 +595,9 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServiceResponse) GetName() string { if x != nil { return x.Name @@ -669,25 +606,20 @@ func (x *ServiceResponse) GetName() string { } // The error code and error message sent by the server when an error occurs. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // This field uses the error codes defined in grpc::StatusCode. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -700,7 +632,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -713,10 +645,9 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorCode() int32 { if x != nil { return x.ErrorCode @@ -724,7 +655,6 @@ func (x *ErrorResponse) GetErrorCode() int32 { return 0 } -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorMessage() string { if x != nil { return x.ErrorMessage @@ -732,139 +662,136 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, - 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, - 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, - 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, +var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, + 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, + 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, + 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, + 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, + 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, + 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, + 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, + 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, + 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc ) -func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) +func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) }) - return file_grpc_reflection_v1alpha_reflection_proto_rawDescData + return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData } -var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -874,7 +801,7 @@ var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse } -var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ +var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse @@ -891,13 +818,13 @@ var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } -func file_grpc_reflection_v1alpha_reflection_proto_init() { - if File_grpc_reflection_v1alpha_reflection_proto != nil { +func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } +func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { + if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -909,7 +836,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -921,7 +848,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -933,7 +860,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -945,7 +872,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -957,7 +884,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -969,7 +896,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -981,7 +908,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -994,14 +921,14 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -1011,18 +938,18 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, + GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() - File_grpc_reflection_v1alpha_reflection_proto = out.File - file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil + File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File + file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 00000000..ee2b82c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,138 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the + // message_request in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 367a029b..b8e76a87 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 The gRPC Authors +// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,16 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// Service exported by server reflection -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto +// Service exported by server reflection // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha @@ -36,10 +34,6 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" -) - // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -58,7 +52,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) if err != nil { return nil, err } @@ -157,5 +151,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "grpc/reflection/v1alpha/reflection.proto", + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", } diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index c3b40839..0b41783a 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -42,15 +42,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -64,19 +61,9 @@ type GRPCServer interface { var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. -// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) -} - -// RegisterV1 registers only the v1 version of the server reflection service -// on the given gRPC server. Many clients may only support v1alpha so most -// users should use Register instead, at least until clients have upgraded. -func RegisterV1(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -131,27 +118,13 @@ type ServerOptions struct { // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. For backwards compatibility reasons, -// this returns the v1alpha version of the reflection server. For a v1 version -// of the reflection server, see NewServerV1. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { - return asV1Alpha(NewServerV1(opts)) -} - -// NewServerV1 returns a reflection server implementation using the given options. -// This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -166,7 +139,7 @@ func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { } type serverReflectionServer struct { - v1alphareflectiongrpc.UnimplementedServerReflectionServer + rpb.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -176,20 +149,11 @@ type serverReflectionServer struct { // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { - if fd.IsPlaceholder() { - // If the given root file is a placeholder, treat it - // as missing instead of serializing it. - return nil, protoregistry.NotFound - } var r [][]byte queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if currentfd.IsPlaceholder() { - // Skip any missing files in the dependency graph. - continue - } if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { sentFileDescriptors[currentfd.Path()] = true fdProto := protodesc.ToFileDescriptorProto(currentfd) @@ -249,11 +213,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -262,7 +226,7 @@ func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceRespons } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -273,79 +237,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.Se return err } - out := &v1reflectionpb.ServerReflectionResponse{ + out := &rpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + case *rpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + case *rpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + case *rpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + case *rpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8a..99db79fa 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,8 +57,7 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto + reflection/grpc_reflection_v1alpha/reflection.proto ) # Generates only the new gRPC Service symbols @@ -120,4 +119,8 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go deleted file mode 100644 index 14aa6f20..00000000 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package dns implements a dns resolver to be installed as the default resolver -// in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. -package dns - -import ( - "google.golang.org/grpc/internal/resolver/dns" - "google.golang.org/grpc/resolver" -) - -// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. -// -// Deprecated: import grpc and use resolver.Get("dns") instead. -func NewBuilder() resolver.Builder { - return dns.NewBuilder() -} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb..efcb7f3e 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value any + value interface{} } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value any, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMap) Set(addr Address, value interface{}) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -136,116 +136,3 @@ func (a *AddressMap) Values() []any { } return ret } - -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} - -// EndpointMap is a map of endpoints to arbitrary values keyed on only the -// unordered set of address strings within an endpoint. This map is not thread -// safe, thus it is unsafe to access concurrently. Must be created via -// NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any -} - -// NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), - } -} - -// Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true - } - return nil, false -} - -// Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return - } - em.endpoints[&en] = value -} - -// Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { - return len(em.endpoints) -} - -// Keys returns a slice of all current map keys, as endpoints specifying the -// addresses present in the endpoint keys, in which uniqueness is determined by -// the unordered set of addresses. Thus, endpoint information returned is not -// the full endpoint data (drops duplicated addresses and attributes) but can be -// used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { - ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) - } - return ret -} - -// Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) - for _, val := range em.endpoints { - ret = append(ret, val) - } - return ret -} - -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - -// Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } -} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index adf89dd9..967cbc73 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,12 @@ package resolver import ( "context" - "fmt" "net" "net/url" - "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -41,9 +40,8 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will -// be used as the scheme registered with this builder. The registry is case -// sensitive, and schemes should not contain any uppercase characters. +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -77,6 +75,25 @@ func GetDefaultScheme() string { return defaultScheme } +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. + GRPCLB +) + // Address represents a server the client connects to. // // # Experimental @@ -92,6 +109,9 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -102,46 +122,34 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attributes do not affect SubConn + // for consumption by the LB policy. These attribes do not affect SubConn // creation, connection establishment, handshaking, etc. - // - // Deprecated: when an Address is inside an Endpoint, this field should not - // be used, and it will eventually be removed entirely. BalancerAttributes *attributes.Attributes + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata any + Metadata interface{} } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -// -// This method compares all fields of the address. When used to tell apart -// addresses during subchannel creation or connection establishment, it might be -// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Metadata == o.Metadata + a.Type == o.Type && a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. func (a Address) String() string { - var sb strings.Builder - sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) - sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) - if a.Attributes != nil { - sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) - } - if a.BalancerAttributes != nil { - sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) - } - sb.WriteString("}") - return sb.String() + return pretty.ToJSON(a) } // BuildOptions includes additional information for the builder to create @@ -170,37 +178,11 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } -// An Endpoint is one network endpoint, or server, which may have multiple -// addresses with which it can be accessed. -type Endpoint struct { - // Addresses contains a list of addresses used to access this endpoint. - Addresses []Address - - // Attributes contains arbitrary data about this endpoint intended for - // consumption by the LB policy. - Attributes *attributes.Attributes -} - // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. - // - // If a resolver sets Addresses but does not set Endpoints, one Endpoint - // will be created for each Address before the State is passed to the LB - // policy. The BalancerAttributes of each entry in Addresses will be set - // in Endpoints.Attributes, and be cleared in the Endpoint's Address's - // BalancerAttributes. - // - // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address - // Endpoints is the latest set of resolved endpoints for the target. - // - // If a resolver produces a State containing Endpoints but not Addresses, - // it must take care to ensure the LB policies it selects will support - // Endpoints. - Endpoints []Endpoint - // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -220,15 +202,6 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - // - // If an error is returned, the resolver should try to resolve the - // target again. The resolver should use a backoff timer to prevent - // overloading the server with requests. If a resolver is certain that - // reresolving will not change the result, e.g. because it is - // a watch-based resolver, returned errors can be ignored. - // - // If the resolved State is the same as the last reported one, calling - // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -240,6 +213,11 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -255,7 +233,23 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. +// +// Examples: +// +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. + Authority string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -263,29 +257,6 @@ type Target struct { URL url.URL } -// Endpoint retrieves endpoint without leading "/" from either `URL.Path` -// or `URL.Opaque`. The latter is used when the former is empty. -func (t Target) Endpoint() string { - endpoint := t.URL.Path - if endpoint == "" { - endpoint = t.URL.Opaque - } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field. - return strings.TrimPrefix(endpoint, "/") -} - -// String returns a string representation of Target. -func (t Target) String() string { - return t.URL.String() -} - // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -293,10 +264,8 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. Scheme is defined - // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned - // string should not contain uppercase characters, as they will not match - // the parsed target's scheme as defined in RFC 3986. + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string } @@ -315,12 +284,9 @@ type Resolver interface { Close() } -// AuthorityOverrider is implemented by Builders that wish to override the -// default authority for the ClientConn. -// By default, the authority used is target.Endpoint(). -type AuthorityOverrider interface { - // OverrideAuthority returns the authority to use for a ClientConn with the - // given target. The implementation must generate it without blocking, - // typically in line, and must keep it unchanged. - OverrideAuthority(Target) string +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000..05a9d4e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return nil + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go deleted file mode 100644 index c79bab12..00000000 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - ignoreServiceConfig bool - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - resolver resolver.Resolver // only accessed within the serializer - - // The following fields are protected by mu. Caller must take cc.mu before - // taking mu. - mu sync.Mutex - curState resolver.State - closed bool -} - -// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used -// after calling start, which builds the resolver. -func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { - ctx, cancel := context.WithCancel(cc.ctx) - return &ccResolverWrapper{ - cc: cc, - ignoreServiceConfig: cc.dopts.disableServiceConfig, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } -} - -// start builds the name resolver using the resolver.Builder in cc and returns -// any error encountered. It must always be the first operation performed on -// any newly created ccResolverWrapper, except that close may be called instead. -func (ccr *ccResolverWrapper) start() error { - errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil { - return - } - opts := resolver.BuildOptions{ - DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, - DialCreds: ccr.cc.dopts.copts.TransportCredentials, - CredsBundle: ccr.cc.dopts.copts.CredsBundle, - Dialer: ccr.cc.dopts.copts.Dialer, - } - var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) - errCh <- err - }) - return <-errCh -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) - }) -} - -// close initiates async shutdown of the wrapper. To determine the wrapper has -// finished shutting down, the channel should block on ccr.serializer.Done() -// without cc.mu held. -func (ccr *ccResolverWrapper) close() { - channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") - ccr.mu.Lock() - ccr.closed = true - ccr.mu.Unlock() - - ccr.serializer.Schedule(func(context.Context) { - if ccr.resolver == nil { - return - } - ccr.resolver.Close() - ccr.resolver = nil - }) - ccr.serializerCancel() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return nil - } - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - ccr.mu.Unlock() - return ccr.cc.updateResolverStateAndUnlock(s, nil) -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return - } - ccr.mu.Unlock() - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return - } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} - ccr.addChannelzTraceEvent(s) - ccr.curState = s - ccr.mu.Unlock() - ccr.cc.updateResolverStateAndUnlock(s, nil) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index a4b6bc68..934fc1aa 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,6 +25,7 @@ import ( "encoding/binary" "fmt" "io" + "io/ioutil" "math" "strings" "sync" @@ -75,8 +76,8 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() any { - w, err := gzip.NewWriterLevel(io.Discard, level) + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } @@ -142,7 +143,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return io.ReadAll(z) + return ioutil.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -159,7 +160,6 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int - onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -296,44 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} -// OnFinish returns a CallOption that configures a callback to be called when -// the call completes. The error passed to the callback is the status of the -// RPC, and may be nil. The onFinish callback provided will only be called once -// by gRPC. This is mainly used to be used by streaming interceptors, to be -// notified when the RPC completes along with information about the status of -// the RPC. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func OnFinish(onFinish func(err error)) CallOption { - return OnFinishCallOption{ - OnFinish: onFinish, - } -} - -// OnFinishCallOption is CallOption that indicates a callback to be called when -// the call completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type OnFinishCallOption struct { - OnFinish func(error) -} - -func (o OnFinishCallOption) before(c *callInfo) error { - c.onFinish = append(c.onFinish, o.OnFinish) - return nil -} - -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} - // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. If this is not set, gRPC uses the default -// 4MB. +// in bytes the client can receive. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -356,8 +320,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. If this is not set, gRPC uses the default -// `math.MaxInt32`. +// in bytes the client can send. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -577,9 +540,6 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -613,7 +573,9 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -626,7 +588,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg interface{}) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -640,18 +602,14 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. -// If both compressors are nil, or if the message has zero length, returns nil, -// indicating no compression was done. +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } - if len(in) == 0 { - return nil, nil - } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -697,15 +655,14 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, } } @@ -726,17 +683,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - compressedLength int // The compressed length got from wire. + wireLength int // The compressed length got from wire. uncompressedBytes []byte } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) + pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(buf) + payInfo.wireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -748,13 +705,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -762,7 +719,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return buf, nil + return d, nil } // Using compressor, decompress d, returning data and size. @@ -789,25 +746,23 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(buf, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) + payInfo.uncompressedBytes = d } return nil } @@ -867,22 +822,19 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...any) error { +func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) -var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) - // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return errContextDeadline + return status.Error(codes.DeadlineExceeded, err.Error()) case context.Canceled: - return errContextCanceled + return status.Error(codes.Canceled, err.Error()) case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index e89c5ac6..f4dde72b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -70,25 +70,23 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.IsRegisteredMethod = func(srv *Server, method string) bool { - return srv.isRegisteredMethod(method) + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) } - internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { - globalServerOptions = append(globalServerOptions, opt...) + extraServerOptions = append(extraServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - globalServerOptions = nil + extraServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -101,20 +99,26 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType any + HandlerType interface{} Methods []MethodDesc Streams []StreamDesc - Metadata any + Metadata interface{} } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl any + serviceImpl interface{} methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata any + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream } // Server is a gRPC server to serve RPC requests. @@ -136,14 +140,12 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop - handlersWG sync.WaitGroup // counts active method handler goroutines + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() - serverWorkerChannelClose func() + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -168,25 +170,20 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int - sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool - waitForHandlers bool } var defaultServerOptions = serverOptions{ - maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, } -var globalServerOptions []ServerOption +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -236,25 +233,10 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// SharedWriteBuffer allows reusing per-connection transport write buffer. -// If this option is set to true every connection will release the buffer after -// flushing the data on the wire. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func SharedWriteBuffer(val bool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.sharedWriteBuffer = val - }) -} - -// WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -262,10 +244,11 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much -// data can be read at most for one read syscall. The default value for this -// buffer is 32KB. Zero or negative values will disable read buffer for a -// connection so data framer can access the underlying conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -290,9 +273,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + if kp.Time > 0 && kp.Time < time.Second { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = internal.KeepaliveMinServerPingTime + kp.Time = time.Second } return newFuncServerOption(func(o *serverOptions) { @@ -404,9 +387,6 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { - if n == 0 { - n = math.MaxUint32 - } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -572,44 +552,6 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } -// WaitForHandlers cause Stop to wait until all outstanding method handlers have -// exited before returning. If false, Stop will return as soon as all -// connections have closed, but method handlers may still be running. By -// default, Stop does not wait for method handlers to return. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WaitForHandlers(w bool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.waitForHandlers = w - }) -} - -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool - }) -} - // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -618,31 +560,39 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// data to be fed by serveStreams. This allows different requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker() { - for completed := 0; completed < serverWorkerResetThreshold; completed++ { - f, ok := <-s.serverWorkerChannel +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch if !ok { return } - f() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() } - go s.serverWorker() + go s.serverWorker(ch) } -// initServerWorkers creates worker goroutines and a channel to process incoming +// initServerWorkers creates worker goroutines and channels to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { - close(s.serverWorkerChannel) - }) + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - go s.serverWorker() + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) } } @@ -650,7 +600,7 @@ func (s *Server) initServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range globalServerOptions { + for _, o := range extraServerOptions { o.apply(&opts) } for _, o := range opt { @@ -684,7 +634,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...any) { +func (s *Server) printf(format string, a ...interface{}) { if s.events != nil { s.events.Printf(format, a...) } @@ -692,7 +642,7 @@ func (s *Server) printf(format string, a ...any) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...any) { +func (s *Server) errorf(format string, a ...interface{}) { if s.events != nil { s.events.Errorf(format, a...) } @@ -707,14 +657,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl any) + RegisterService(desc *ServiceDesc, impl interface{}) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss any) { +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -725,7 +675,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss any) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss any) { +func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -766,7 +716,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata any + Metadata interface{} } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -827,18 +777,6 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. -// -// Note: All supported releases of Go (as of December 2023) override the OS -// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive -// with OS defaults for keepalive time and interval, callers need to do the -// following two things: -// - pass a net.Listener created by calling the Listen method on a -// net.ListenConfig with the `KeepAlive` field set to a negative value. This -// will result in the Go standard library not overriding OS defaults for TCP -// keepalive interval and time. But this will also result in the Go standard -// library not enabling TCP keepalives by default. -// - override the Accept method on the passed in net.Listener and set the -// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -946,21 +884,24 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } - if cc, ok := rawConn.(interface { - PassServerTransport(transport.ServerTransport) - }); ok { - cc.PassServerTransport(st) - } - if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(context.Background(), st, rawConn) + s.serveStreams(st) s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -976,7 +917,6 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, - SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -1001,44 +941,38 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { - ctx = transport.SetConnection(ctx, rawConn) - ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ - RemoteAddr: st.Peer().Addr, - LocalAddr: st.Peer().LocalAddr, - }) - sh.HandleConn(ctx, &stats.ConnBegin{}) - } - - defer func() { - st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) - } - }() - - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { - s.handlersWG.Add(1) - streamQuota.acquire() - f := func() { - defer streamQuota.release() - defer s.handlersWG.Done() - s.handleStream(st, stream) - } +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- f: - return + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: default: // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx } - go f() + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) }) + wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1074,28 +1008,51 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - // Errors returned from transport.NewServerHandlerTransport have - // already been written to w. + http.Error(w, err.Error(), http.StatusInternalServerError) return } if !s.addConn(listenerAddressForServeHTTP, st) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(r.Context(), st, nil) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close(errors.New("Server.addConn called when server has already been stopped")) + st.Close() return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain("") + st.Drain() } if s.conns[addr] == nil { @@ -1145,7 +1102,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1164,7 +1121,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1192,21 +1149,26 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } -} - -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - return func(ctx context.Context, req any) (any, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) + } + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) + } + return state.next(ctx, req) } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1220,7 +1182,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(ctx, statsBegin) + sh.HandleRPC(stream.Context(), statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1238,7 +1200,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1252,7 +1214,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(ctx, end) + sh.HandleRPC(stream.Context(), end) } if channelz.IsOn() { @@ -1274,6 +1236,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } } if len(binlogs) != 0 { + ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1293,7 +1256,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(ctx, logEntry) + binlog.Log(logEntry) } } @@ -1304,7 +1267,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor - var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1325,18 +1287,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - sendCompressorName = cp.Type() + stream.SetSendCompress(cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - sendCompressorName = comp.Name() - } - } - - if sendCompressorName != "" { - if err := stream.SetSendCompress(sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + stream.SetSendCompress(rc) } } @@ -1344,28 +1300,27 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } return err } if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v any) error { + df := func(v interface{}) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(ctx, &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Length: len(d), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Data: d, + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), }) } if len(binlogs) != 0 { @@ -1373,7 +1328,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor Message: d, } for _, binlog := range binlogs { - binlog.Log(ctx, cm) + binlog.Log(cm) } } if trInfo != nil { @@ -1381,7 +1336,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } return nil } - ctx = NewContextWithServerTransportStream(ctx, stream) + ctx := NewContextWithServerTransportStream(stream.Context(), stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1406,7 +1361,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor Header: h, } for _, binlog := range binlogs { - binlog.Log(ctx, sh) + binlog.Log(sh) } } st := &binarylog.ServerTrailer{ @@ -1414,7 +1369,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor Err: appErr, } for _, binlog := range binlogs { - binlog.Log(ctx, st) + binlog.Log(st) } } return appErr @@ -1424,12 +1379,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } opts := &transport.Options{Last: true} - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if stream.SendCompress() != sendCompressorName { - comp = encoding.GetCompressor(stream.SendCompress()) - } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1456,8 +1406,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor Err: appErr, } for _, binlog := range binlogs { - binlog.Log(ctx, sh) - binlog.Log(ctx, st) + binlog.Log(sh) + binlog.Log(st) } } return err @@ -1471,8 +1421,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor Message: reply, } for _, binlog := range binlogs { - binlog.Log(ctx, sh) - binlog.Log(ctx, sm) + binlog.Log(sh) + binlog.Log(sm) } } if channelz.IsOn() { @@ -1484,16 +1434,17 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, } for _, binlog := range binlogs { - binlog.Log(ctx, st) + binlog.Log(st) } } - return t.WriteStatus(stream, statusOK) + return err } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1518,21 +1469,26 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } -} - -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - return func(srv any, stream ServerStream) error { - return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) + } + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) + } + return state.next(srv, ss) } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1546,15 +1502,15 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) + sh.HandleRPC(stream.Context(), statsBegin) } } - ctx = NewContextWithServerTransportStream(ctx, stream) + ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1568,7 +1524,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1585,7 +1541,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(ctx, end) + sh.HandleRPC(stream.Context(), end) } } @@ -1627,7 +1583,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(ctx, logEntry) + binlog.Log(logEntry) } } @@ -1650,18 +1606,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - ss.sendCompressorName = s.opts.cp.Type() + stream.SetSendCompress(s.opts.cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - ss.sendCompressorName = rc - } - } - - if ss.sendCompressorName != "" { - if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + stream.SetSendCompress(rc) } } @@ -1671,7 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server any + var server interface{} if info != nil { server = info.serviceImpl } @@ -1699,16 +1649,16 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.trInfo.tr.SetError() ss.mu.Unlock() } + t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(ctx, st) + binlog.Log(st) } } - t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1717,93 +1667,60 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } + err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(ctx, st) + binlog.Log(st) } } - return t.WriteStatus(ss.s, statusOK) + return err } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { - ctx := stream.Context() - ctx = contextWithServer(ctx, s) - var ti *traceInfo - if EnableTracing { - tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) - ctx = trace.NewContext(ctx, tr) - ti = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: t.Peer().Addr, - }, - } - if dl, ok := ctx.Deadline(); ok { - ti.firstLine.deadline = time.Until(dl) - } - } - +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - ti.tr.SetError() + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ti.tr.SetError() + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if ti != nil { - ti.tr.Finish() + if trInfo != nil { + trInfo.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } - // To have calls in stream callouts work. Will delete once all stats handler - // calls come from the gRPC layer. - stream.SetContext(ctx) - srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(t, stream, srv, md, trInfo) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(t, stream, srv, sd, trInfo) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } var errDesc string @@ -1812,19 +1729,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if ti != nil { - ti.tr.LazyPrintf("%s", errDesc) - ti.tr.SetError() + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ti.tr.SetError() + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if ti != nil { - ti.tr.Finish() + if trInfo != nil { + trInfo.tr.Finish() } } @@ -1879,88 +1796,86 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.stop(false) -} - -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.stop(true) -} - -func (s *Server) stop(graceful bool) { s.quit.Fire() - defer s.done.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - s.closeListenersLocked() - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. + listeners := s.lis + s.lis = nil + conns := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - defer s.mu.Unlock() - - if graceful { - s.drainAllServerTransportsLocked() - } else { - s.closeServerTransportsLocked() + for lis := range listeners { + lis.Close() } - - for len(s.conns) != 0 { - s.cv.Wait() + for _, cs := range conns { + for st := range cs { + st.Close() + } } - s.conns = nil - if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the - // connections have been closed above ensures that there are no - // goroutines executing the callback passed to st.HandleStreams (where - // the channel is written to). - s.serverWorkerChannelClose() - } - - if graceful || s.opts.waitForHandlers { - s.handlersWG.Wait() + s.stopServerWorkers() } + s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } + s.mu.Unlock() } -// s.mu must be held by the caller. -func (s *Server) closeServerTransportsLocked() { - for _, conns := range s.conns { - for st := range conns { - st.Close(errors.New("Server.Stop called")) - } +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return } -} -// s.mu must be held by the caller. -func (s *Server) drainAllServerTransportsLocked() { + for lis := range s.lis { + lis.Close() + } + s.lis = nil if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain("graceful_stop") + st.Drain() } } s.drain = true } -} -// s.mu must be held by the caller. -func (s *Server) closeListenersLocked() { - for lis := range s.lis { - lis.Close() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() } - s.lis = nil + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() } // contentSubtype must be lowercase @@ -1974,50 +1889,11 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { - logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } -type serverKey struct{} - -// serverFromContext gets the Server from the context. -func serverFromContext(ctx context.Context) *Server { - s, _ := ctx.Value(serverKey{}).(*Server) - return s -} - -// contextWithServer sets the Server in the context. -func contextWithServer(ctx context.Context, server *Server) context.Context { - return context.WithValue(ctx, serverKey{}, server) -} - -// isRegisteredMethod returns whether the passed in method is registered as a -// method on the server. /service/method and service/method will match if the -// service and method are registered on the server. -func (s *Server) isRegisteredMethod(serviceMethod string) bool { - if serviceMethod != "" && serviceMethod[0] == '/' { - serviceMethod = serviceMethod[1:] - } - pos := strings.LastIndex(serviceMethod, "/") - if pos == -1 { // Invalid method name syntax. - return false - } - service := serviceMethod[:pos] - method := serviceMethod[pos+1:] - srv, knownService := s.services[service] - if knownService { - if _, ok := srv.methods[method]; ok { - return true - } - if _, ok := srv.streams[method]; ok { - return true - } - } - return false -} - // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2068,60 +1944,6 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } -// SetSendCompressor sets a compressor for outbound messages from the server. -// It must not be called after any event that causes headers to be sent -// (see ServerStream.SetHeader for the complete list). Provided compressor is -// used when below conditions are met: -// -// - compressor is registered via encoding.RegisterCompressor -// - compressor name must exist in the client advertised compressor names -// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to -// get client supported compressor names. -// -// The context provided must be the context passed to the server's handler. -// It must be noted that compressor name encoding.Identity disables the -// outbound compression. -// By default, server messages will be sent using the same compressor with -// which request messages were sent. -// -// It is not safe to call SetSendCompressor concurrently with SendHeader and -// SendMsg. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return fmt.Errorf("failed to fetch the stream from the given context") - } - - if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { - return fmt.Errorf("unable to set send compressor: %w", err) - } - - return stream.SetSendCompress(name) -} - -// ClientSupportedCompressors returns compressor names advertised by the client -// via grpc-accept-encoding header. -// -// The context provided must be the context passed to the server's handler. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) - } - - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil -} - // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -2156,53 +1978,3 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } - -// validateSendCompressor returns an error when given compressor name cannot be -// handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { - if name == encoding.Identity { - return nil - } - - if !grpcutil.IsCompressorNameRegistered(name) { - return fmt.Errorf("compressor not registered %q", name) - } - - for _, c := range strings.Split(clientCompressors, ",") { - if c == name { - return nil // found match - } - } - return fmt.Errorf("client does not support compressor %q", name) -} - -// atomicSemaphore implements a blocking, counting semaphore. acquire should be -// called synchronously; release may be called asynchronously. -type atomicSemaphore struct { - n atomic.Int64 - wait chan struct{} -} - -func (q *atomicSemaphore) acquire() { - if q.n.Add(-1) < 0 { - // We ran out of quota. Block until a release happens. - <-q.wait - } -} - -func (q *atomicSemaphore) release() { - // N.B. the "<= 0" check below should allow for this to work with multiple - // concurrent calls to acquire, but also note that with synchronous calls to - // acquire, as our system does, n will never be less than -1. There are - // fairness issues (queuing) to consider if this was to be generalized. - if q.n.Add(1) <= 0 { - // An acquire was waiting on us. Unblock it. - q.wait <- struct{}{} - } -} - -func newHandlerQuota(n uint32) *atomicSemaphore { - a := &atomicSemaphore{wait: make(chan struct{}, 1)} - a.n.Store(int64(n)) - return a -} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc0..01bbb202 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,6 +23,8 @@ import ( "errors" "fmt" "reflect" + "strconv" + "strings" "time" "google.golang.org/grpc/codes" @@ -104,8 +106,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff internalserviceconfig.Duration - MaxBackoff internalserviceconfig.Duration + InitialBackoff string + MaxBackoff string BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -127,6 +129,50 @@ type retryThrottlingPolicy struct { TokenRatio float64 } +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + type jsonName struct { Service string Method string @@ -155,7 +201,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *internalserviceconfig.Duration + Timeout *string MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -180,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -206,13 +252,18 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: (*time.Duration)(m.Timeout), + Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -232,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -261,10 +312,18 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || + *ib <= 0 || + *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -273,8 +332,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: time.Duration(jrp.InitialBackoff), - MaxBackoff: time.Duration(jrp.MaxBackoff), + InitialBackoff: *ib, + MaxBackoff: *mb, BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe..00000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d..0285dcc6 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,36 +59,18 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} - -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } - -func (*PickerUpdated) isRPCStats() {} - // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload any + Payload interface{} // Data is the serialized message payload. Data []byte - - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). + // Length is the length of uncompressed data. Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. + // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int - // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -144,18 +126,12 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload any + Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). + // Length is the length of uncompressed data. Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. + // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a93360ef..623be39f 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...any) *Status { +func Newf(c codes.Code, format string, a ...interface{}) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...any) error { +func Errorf(c codes.Code, format string, a ...interface{}) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -77,18 +77,9 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type -// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped -// errors, the message returned contains the entire err.Error() text and not -// just the wrapped status. In that case, ok is true. +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message, and ok -// is true. -// -// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` -// returns nil (which maps to Codes.OK), or if err wraps a type -// satisfying this, a Status is returned with codes.Unknown and err's -// Error() message, and ok is false. +// - If err is nil, a Status is returned with codes.OK and no message. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -97,31 +88,10 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - type grpcstatus interface{ GRPCStatus() *Status } - if gs, ok := err.(grpcstatus); ok { - grpcStatus := gs.GRPCStatus() - if grpcStatus == nil { - // Error has status nil, which maps to codes.OK. There - // is no sensible behavior for this, so we turn it into - // an error with codes.Unknown and discard the existing - // status. - return New(codes.Unknown, err.Error()), false - } - return grpcStatus, true - } - var gs grpcstatus - if errors.As(err, &gs) { - grpcStatus := gs.GRPCStatus() - if grpcStatus == nil { - // Error wraps an error that has status nil, which maps - // to codes.OK. There is no sensible behavior for this, - // so we turn it into an error with codes.Unknown and - // discard the existing status. - return New(codes.Unknown, err.Error()), false - } - p := grpcStatus.Proto() - p.Message = err.Error() - return status.FromProto(p), true + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false } @@ -133,16 +103,19 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error or if it wraps a -// Status error. If that is not the case, it returns codes.OK if err is nil, or -// codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - - return Convert(err).Code() + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown } // FromContextError converts a context error or wrapped context error into a diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d621f52b..960c3e33 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -48,8 +47,6 @@ import ( "google.golang.org/grpc/status" ) -var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) - // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -57,7 +54,7 @@ var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(conte // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv any, stream ServerStream) error +type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -82,9 +79,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m any) error + SendMsg(m interface{}) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m any) error + RecvMsg(m interface{}) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -93,9 +90,7 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. If the metadata - // is nil and the error is also nil, then the stream was terminated without - // headers, and the status can be discovered by calling RecvMsg. + // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -128,10 +123,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - // - // It is not safe to modify the message after calling SendMsg. Tracing - // libraries and stats handlers may use the message lazily. - SendMsg(m any) error + SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -140,7 +132,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error + RecvMsg(m interface{}) error } // NewStream creates a new Stream for the client side. This is typically @@ -176,29 +168,10 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - // Start tracking the RPC for idleness purposes. This is where a stream is - // created for both streaming and unary RPCs, and hence is a good place to - // track active RPC count. - if err := cc.idlenessMgr.OnCallBegin(); err != nil { - return nil, err - } - // Add a calloption, to decrement the active call count, that gets executed - // when the RPC completes. - opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - - if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { - // validate md + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } - // validate added - for _, kvs := range added { - for i := 0; i < len(kvs); i += 2 { - if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } - } } if channelz.IsOn() { cc.incrCallsStarted() @@ -379,7 +352,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) + binlog.Log(logEntry) } } @@ -443,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -465,7 +438,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -482,25 +455,6 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - - // Merge metadata stored in PickResult, if any, with existing call metadata. - // It is safe to overwrite the csAttempt's context here, since all state - // maintained in it are local to the attempt. When the attempt has to be - // retried, a new instance of csAttempt will be created. - if a.pickResult.Metadata != nil { - // We currently do not have a function it the metadata package which - // merges given metadata with existing metadata in a context. Existing - // function `AppendToOutgoingContext()` takes a variadic argument of key - // value pairs. - // - // TODO: Make it possible to retrieve key value pairs from metadata.MD - // in a form passable to AppendToOutgoingContext(), or create a version - // of AppendToOutgoingContext() that accepts a metadata.MD. - md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metadata) - a.ctx = metadata.NewOutgoingContext(a.ctx, md) - } - s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -517,7 +471,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s} return nil } @@ -575,12 +529,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - pickResult balancer.PickResult + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) finished bool dc Decompressor @@ -798,24 +752,23 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) - if m == nil && err == nil { - // The stream ended with success. Finish the clientStream. - err = io.EOF - } - if err != nil { cs.finish(err) - // Do not return the error. The user should get it by calling Recv(). - return nil, nil + return nil, err } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -828,10 +781,9 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) + binlog.Log(logEntry) } } - return m, nil } @@ -872,7 +824,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m any) (err error) { +func (cs *clientStream) SendMsg(m interface{}) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -910,13 +862,13 @@ func (cs *clientStream) SendMsg(m any) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, cm) + binlog.Log(cm) } } return err } -func (cs *clientStream) RecvMsg(m any) error { +func (cs *clientStream) RecvMsg(m interface{}) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -934,12 +886,30 @@ func (cs *clientStream) RecvMsg(m any) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, sm) + binlog.Log(sm) } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) + + if len(cs.binlogs) != 0 { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } + } } return err } @@ -964,7 +934,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, chc) + binlog.Log(chc) } } // We never returned an error here for reasons. @@ -982,9 +952,6 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true - for _, onFinish := range cs.callInfo.onFinish { - onFinish(err) - } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -995,30 +962,18 @@ func (cs *clientStream) finish(err error) { } } } - cs.mu.Unlock() - // Only one of cancel or trailer needs to be logged. - if len(cs.binlogs) != 0 { - switch err { - case errContextCanceled, errContextDeadline, ErrClientConnClosing: - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) - } - default: - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(c) } } if err == nil { @@ -1034,7 +989,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1061,7 +1016,7 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1107,10 +1062,9 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1149,12 +1103,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.pickResult.Done != nil { + if a.done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.pickResult.Done(balancer.DoneInfo{ + a.done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1276,22 +1230,17 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on stream context to cleanup when the stream context is - // canceled. Also listen for the addrConn's context in case the - // addrConn is closed or reconnects to a different address. In all - // other cases, an error should already be injected into the recv - // buffer by the transport, which the client will eventually receive, - // and then we will cancel the stream's context in - // addrConnStream.finish. + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. go func() { - ac.mu.Lock() - acCtx := ac.ctx - ac.mu.Unlock() select { - case <-acCtx.Done(): + case <-ac.ctx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1354,7 +1303,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m any) (err error) { +func (as *addrConnStream) SendMsg(m interface{}) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1399,7 +1348,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m any) (err error) { +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1515,10 +1464,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. - // - // It is not safe to modify the message after calling SendMsg. Tracing - // libraries and stats handlers may use the message lazily. - SendMsg(m any) error + SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1527,7 +1473,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error + RecvMsg(m interface{}) error } // serverStream implements a server side Stream. @@ -1543,8 +1489,6 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor - sendCompressorName string - maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1592,7 +1536,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) + binlog.Log(sh) } } return err @@ -1608,7 +1552,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m any) (err error) { +func (ss *serverStream) SendMsg(m interface{}) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1616,7 +1560,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } } @@ -1637,13 +1581,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { } }() - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) - ss.sendCompressorName = sendCompressorsName - } - // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1665,14 +1602,14 @@ func (ss *serverStream) SendMsg(m any) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) + binlog.Log(sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sm) + binlog.Log(sm) } } if len(ss.statsHandler) != 0 { @@ -1683,7 +1620,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { return nil } -func (ss *serverStream) RecvMsg(m any) (err error) { +func (ss *serverStream) RecvMsg(m interface{}) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1691,7 +1628,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } } @@ -1720,7 +1657,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, chc) + binlog.Log(chc) } } return err @@ -1736,10 +1673,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), }) } } @@ -1748,7 +1684,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, cm) + binlog.Log(cm) } } return nil @@ -1763,7 +1699,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 07f01257..bfa5dfa4 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,8 +27,6 @@ package tap import ( "context" - - "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -36,10 +34,6 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string - - // Header contains the header metadata received. - Header metadata.MD - // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 9ded7932..07a2d26b 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg any // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []any + a []interface{} } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1ad1ba2a..2198e709 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.61.0" +const Version = "1.51.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 5da38a40..bd8e0cdb 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,13 +35,22 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ + golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -57,16 +66,6 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - # - Ensure all source files contain a copyright message. # (Done in two parts because Darwin "git grep" has broken support for compound # exclusion matches.) @@ -76,19 +75,12 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -96,15 +88,20 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, go vet, go mod tidy. +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -112,79 +109,104 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.19 + go mod tidy git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd done # - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3..5f28148d 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,7 +11,6 @@ import ( "strconv" "strings" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -24,7 +23,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given [proto.Message]. +// Unmarshal reads the given []byte into the given proto.Message. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -38,7 +37,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields and enum name values are ignored. + // If DiscardUnknown is set, unknown fields are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -48,13 +47,9 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } - - // RecursionLimit limits how deeply messages may be nested. - // If zero, a default limit is applied. - RecursionLimit int } -// Unmarshal reads the given []byte and populates the given [proto.Message] +// Unmarshal reads the given []byte and populates the given proto.Message // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -72,9 +67,6 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } - if o.RecursionLimit == 0 { - o.RecursionLimit = protowire.DefaultRecursionLimit - } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -102,7 +94,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...any) error { +func (d decoder) newError(pos int, f string, x ...interface{}) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +106,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...any) error { +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -122,10 +114,6 @@ func (d decoder) syntaxError(pos int, f string, x ...any) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { - d.opts.RecursionLimit-- - if d.opts.RecursionLimit < 0 { - return errors.New("exceeded max recursion depth") - } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -278,9 +266,7 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - if val.IsValid() { - m.Set(fd, val) - } + m.Set(fd, val) return nil } @@ -343,7 +329,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + if v, ok := unmarshalEnum(tok, fd); ok { return v, nil } @@ -488,7 +474,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -496,9 +482,6 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnkno if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } - if discardUnknown { - return protoreflect.Value{}, true - } case json.Number: if n, ok := tok.Int(32); ok { @@ -559,9 +542,7 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - if val.IsValid() { - list.Append(val) - } + list.Append(val) } } @@ -628,9 +609,8 @@ Loop: if err != nil { return err } - if pval.IsValid() { - mmap.Set(pkey, pval) - } + + mmap.Set(pkey, pval) } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index ae71007c..21d5d2cb 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard [encoding/json] +// This package produces a different output than the standard "encoding/json" // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df2..66b95870 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,17 +25,15 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given [proto.Message] in JSON format using default options. -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -83,25 +81,6 @@ type MarshalOptions struct { // ╚═══════╧════════════════════════════╝ EmitUnpopulated bool - // EmitDefaultValues specifies whether to emit default-valued primitive fields, - // empty lists, and empty maps. The fields affected are as follows: - // ╔═══════╤════════════════════════════════════════╗ - // ║ JSON │ Protobuf field ║ - // ╠═══════╪════════════════════════════════════════╣ - // ║ false │ non-optional scalar boolean fields ║ - // ║ 0 │ non-optional scalar numeric fields ║ - // ║ "" │ non-optional scalar string/byte fields ║ - // ║ [] │ empty repeated fields ║ - // ║ {} │ empty map fields ║ - // ╚═══════╧════════════════════════════════════════╝ - // - // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, - // i.e. presence-sensing fields that are omitted will remain omitted to preserve - // presence-sensing. - // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates - // a strict superset of the latter. - EmitDefaultValues bool - // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -112,9 +91,8 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -124,10 +102,9 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given [proto.Message] in the JSON format using options in -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } @@ -201,11 +178,7 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct { - protoreflect.Message - - skipNull bool -} +type unpopulatedFieldRanger struct{ protoreflect.Message } func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -219,9 +192,6 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { - if m.skipNull { - continue - } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -247,11 +217,8 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - switch { - case e.opts.EmitUnpopulated: - fields = unpopulatedFieldRanger{Message: m, skipNull: false} - case e.opts.EmitDefaultValues: - fields = unpopulatedFieldRanger{Message: m, skipNull: true} + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c82..6c37d417 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + dec := decoder{d.Clone(), UnmarshalOptions{}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,29 +308,48 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - var open int - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose, json.ArrayClose: - open-- - case json.ObjectOpen, json.ArrayOpen: - open++ - if open > d.opts.RecursionLimit { - return errors.New("exceeded max recursion depth") + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } } - case json.EOF: - // This can only happen if there's a bug in Decoder.Read. - // Avoid an infinite loop if this does happen. - return errors.New("unexpected EOF") } - if open == 0 { - return nil + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } } } + return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac..4921b2d4 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given [proto.Message]. +// Unmarshal reads the given []byte into the given proto.Message. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given [proto.Message] +// Unmarshal reads the given []byte and populates the given proto.Message // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...any) error { +func (d decoder) newError(pos int, f string, x ...interface{}) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...any) error { +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -739,9 +739,7 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - if err := d.skipMessageValue(); err != nil { - return err - } + return d.skipMessageValue() default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 1f57e661..722a7b41 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,17 +27,15 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. Its output will change -// across different builds of your program, even when using the same version of -// the protobuf module. +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -86,9 +84,8 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. Its output will change across -// different builds of your program, even when using the same version of the -// protobuf module. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -100,10 +97,9 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. Its output -// will change across different builds of your program, even when using the -// same version of the protobuf module. +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc98..f4b4686c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the [google.golang.org/protobuf/proto] package instead. +// use the "google.golang.org/protobuf/proto" package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field [Number] and wire [Type] have already been parsed. -// This returns a negative length upon an error (see [ParseError]). +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see [ParseError]). +// This returns a negative length upon an error (see ParseError). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field [Number] and wire [Type] from its unified form. -// The [Number] is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field [Number] and wire [Type] into its unified form. +// EncodeTag encodes the field Number and wire Type into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 87e46bd4..db5248e1 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,13 +83,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rv := reflect.ValueOf(vs.Get(i)) - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Path"), "Path"}, - {rv.MethodByName("Package"), "Package"}, - {rv.MethodByName("IsPublic"), "IsPublic"}, - {rv.MethodByName("IsWeak"), "IsWeak"}, - }...) + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -98,26 +92,34 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -type methodAndName struct { - method reflect.Value - name string +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) } - -func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { - return formatDescOpt(t, isRoot, allowMulti, record) -} - -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -127,60 +129,26 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{ - allowMulti: allowMulti, - record: record, - } + rs := records{allowMulti: allowMulti} if t.IsPlaceholder() { if isFile { - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Path"), "Path"}, - {rv.MethodByName("Package"), "Package"}, - {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, - }...) + rs.Append(rv, "Path", "Package", "IsPlaceholder") } else { - rs.Append(rv, []methodAndName{ - {rv.MethodByName("FullName"), "FullName"}, - {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, - }...) + rs.Append(rv, "FullName", "IsPlaceholder") } } else { switch { case isFile: - rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) + rs.Append(rv, "Syntax") case isRoot: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Syntax"), "Syntax"}, - {rv.MethodByName("FullName"), "FullName"}, - }...) + rs.Append(rv, "Syntax", "FullName") default: - rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) + rs.Append(rv, "Name") } switch t := t.(type) { case protoreflect.FieldDescriptor: - accessors := []methodAndName{ - {rv.MethodByName("Number"), "Number"}, - {rv.MethodByName("Cardinality"), "Cardinality"}, - {rv.MethodByName("Kind"), "Kind"}, - {rv.MethodByName("HasJSONName"), "HasJSONName"}, - {rv.MethodByName("JSONName"), "JSONName"}, - {rv.MethodByName("HasPresence"), "HasPresence"}, - {rv.MethodByName("IsExtension"), "IsExtension"}, - {rv.MethodByName("IsPacked"), "IsPacked"}, - {rv.MethodByName("IsWeak"), "IsWeak"}, - {rv.MethodByName("IsList"), "IsList"}, - {rv.MethodByName("IsMap"), "IsMap"}, - {rv.MethodByName("MapKey"), "MapKey"}, - {rv.MethodByName("MapValue"), "MapValue"}, - {rv.MethodByName("HasDefault"), "HasDefault"}, - {rv.MethodByName("Default"), "Default"}, - {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, - {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, - {rv.MethodByName("Message"), "Message"}, - {rv.MethodByName("Enum"), "Enum"}, - } - for _, s := range accessors { - switch s.name { + for _, s := range descriptorAccessors[rt] { + switch s { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -189,20 +157,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -219,62 +187,13 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - - case protoreflect.FileDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Path"), "Path"}, - {rv.MethodByName("Package"), "Package"}, - {rv.MethodByName("Imports"), "Imports"}, - {rv.MethodByName("Messages"), "Messages"}, - {rv.MethodByName("Enums"), "Enums"}, - {rv.MethodByName("Extensions"), "Extensions"}, - {rv.MethodByName("Services"), "Services"}, - }...) - - case protoreflect.MessageDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, - {rv.MethodByName("Fields"), "Fields"}, - {rv.MethodByName("Oneofs"), "Oneofs"}, - {rv.MethodByName("ReservedNames"), "ReservedNames"}, - {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, - {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, - {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, - {rv.MethodByName("Messages"), "Messages"}, - {rv.MethodByName("Enums"), "Enums"}, - {rv.MethodByName("Extensions"), "Extensions"}, - }...) - - case protoreflect.EnumDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Values"), "Values"}, - {rv.MethodByName("ReservedNames"), "ReservedNames"}, - {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, - {rv.MethodByName("IsClosed"), "IsClosed"}, - }...) - - case protoreflect.EnumValueDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Number"), "Number"}, - }...) - - case protoreflect.ServiceDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Methods"), "Methods"}, - }...) - - case protoreflect.MethodDescriptor: - rs.Append(rv, []methodAndName{ - {rv.MethodByName("Input"), "Input"}, - {rv.MethodByName("Output"), "Output"}, - {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, - {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, - }...) + default: + rs.Append(rv, descriptorAccessors[rt]...) } - if m := rv.MethodByName("GoType"); m.IsValid() { - rs.Append(rv, methodAndName{m, "GoType"}) + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") } } return start + rs.Join() + end @@ -283,34 +202,19 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu type records struct { recs [][2]string allowMulti bool - - // record is a function that will be called for every Append() or - // AppendRecs() call, to be used for testing with the - // InternalFormatDescOptForTesting function. - record func(string) } -func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { - if rs.record != nil { - rs.record(fieldName) - } - rs.recs = append(rs.recs, newRecs) -} - -func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { +func (rs *records) Append(v reflect.Value, accessors ...string) { for _, a := range accessors { - if rs.record != nil { - rs.record(a.name) - } var rv reflect.Value - if a.method.IsValid() { - rv = a.method.Call(nil)[0] + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a.name) + rv = v.FieldByName(a) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -357,7 +261,7 @@ func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a.name, s}) + rs.recs = append(rs.recs, [2]string{a, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go deleted file mode 100644 index 14656b65..00000000 --- a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package editiondefaults contains the binary representation of the editions -// defaults. -package editiondefaults - -import _ "embed" - -//go:embed editions_defaults.binpb -var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb deleted file mode 100644 index ff6a38360add36f53d48bb0863b701696e0d7b2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go deleted file mode 100644 index 029a6a12..00000000 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package editionssupport defines constants for editions that are supported. -package editionssupport - -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" - -const ( - Minimum = descriptorpb.Edition_EDITION_PROTO2 - Maximum = descriptorpb.Edition_EDITION_2023 -) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index ea1d3e65..d043a6eb 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind&(Name|comma) != 0 || + d.lastToken.kind == comma || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c760..373d2083 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,7 +32,6 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 - f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -108,7 +107,8 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.EditionFeatures.IsPacked = true + f.L1.HasPacked = true + f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 099b2bf4..87853e78 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...any) error { +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index c2d6bd52..20c17b35 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...any) error { +func New(f string, x ...interface{}) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...any) error { +func Wrap(err error, f string, x ...interface{}) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...any) string { +func format(f string, x ...interface{}) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,18 +87,3 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } - -type SizeMismatchError struct { - Calculated, Measured int -} - -func (e *SizeMismatchError) Error() string { - return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) -} - -func MismatchedSizeCalculation(calculated, measured int) error { - return &SizeMismatchError{ - Calculated: calculated, - Measured: measured, - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..7c3689ba 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,7 +7,6 @@ package filedesc import ( "bytes" "fmt" - "strings" "sync" "sync/atomic" @@ -22,26 +21,11 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Edition is an Enum for proto2.Edition -type Edition int32 - -// These values align with the value of Enum in descriptor.proto which allows -// direct conversion between the proto enum and this enum. -const ( - EditionUnknown Edition = 0 - EditionProto2 Edition = 998 - EditionProto3 Edition = 999 - Edition2023 Edition = 1000 - EditionUnsupported Edition = 100000 -) - // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. If the associated proto uses Editions, the -// Editions features must always be resolved. If not explicitly set, the -// appropriate default must be resolved and set. +// must be initialized up front. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -60,7 +44,6 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax - Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -68,53 +51,21 @@ type ( Messages Messages Extensions Extensions Services Services - - EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } - - EditionFeatures struct { - // IsFieldPresence is true if field_presence is EXPLICIT - // https://protobuf.dev/editions/features/#field_presence - IsFieldPresence bool - // IsFieldPresence is true if field_presence is LEGACY_REQUIRED - // https://protobuf.dev/editions/features/#field_presence - IsLegacyRequired bool - // IsOpenEnum is true if enum_type is OPEN - // https://protobuf.dev/editions/features/#enum_type - IsOpenEnum bool - // IsPacked is true if repeated_field_encoding is PACKED - // https://protobuf.dev/editions/features/#repeated_field_encoding - IsPacked bool - // IsUTF8Validated is true if utf_validation is VERIFY - // https://protobuf.dev/editions/features/#utf8_validation - IsUTF8Validated bool - // IsDelimitedEncoded is true if message_encoding is DELIMITED - // https://protobuf.dev/editions/features/#message_encoding - IsDelimitedEncoded bool - // IsJSONCompliant is true if json_format is ALLOW - // https://protobuf.dev/editions/features/#json_format - IsJSONCompliant bool - // GenerateLegacyUnmarshalJSON determines if the plugin generates the - // UnmarshalJSON([]byte) error method for enums. - GenerateLegacyUnmarshalJSON bool - } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -166,8 +117,6 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated - - EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -206,9 +155,6 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *Enum) IsClosed() bool { - return !ed.L1.EditionFeatures.IsOpenEnum -} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -232,8 +178,6 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions - - EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -258,12 +202,14 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor - - EditionFeatures EditionFeatures } Oneof struct { @@ -273,8 +219,6 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof - - EditionFeatures EditionFeatures } ) @@ -324,30 +268,25 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { - return fd.L1.Kind -} -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - if fd.L1.Cardinality == protoreflect.Repeated { - return false - } - return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if fd.L1.Cardinality != protoreflect.Repeated { - return false - } - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - return false + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + default: + return true + } } - return fd.L1.EditionFeatures.IsPacked + return fd.L1.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -383,10 +322,6 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } -func (fd *Field) IsMapEntry() bool { - parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) - return ok && parent.IsMapEntry() -} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -398,7 +333,10 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - return fd.L1.EditionFeatures.IsUTF8Validated + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { @@ -421,16 +359,16 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind - EditionFeatures EditionFeatures + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -453,16 +391,7 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { - if xd.L1.Cardinality != protoreflect.Repeated { - return false - } - switch xd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - return false - } - return xd.L1.EditionFeatures.IsPacked -} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -543,9 +472,8 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} - SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -587,34 +515,6 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -// Returns true if this field is structured like the synthetic field of a proto2 -// group. This allows us to expand our treatment of delimited fields without -// breaking proto2 files that have been upgraded to editions. -func isGroupLike(fd protoreflect.FieldDescriptor) bool { - // Groups are always group types. - if fd.Kind() != protoreflect.GroupKind { - return false - } - - // Group fields are always the lowercase type name. - if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { - return false - } - - // Groups could only be defined in the same file they're used. - if fd.Message().ParentFile() != fd.ParentFile() { - return false - } - - // Group messages are always defined in the same scope as the field. File - // level extensions will compare NULL == NULL here, which is why the file - // comparison above is necessary to ensure both come from the same file. - if fd.IsExtension() { - return fd.Parent() == fd.Message().Parent() - } - return fd.ContainingMessage() == fd.Message().Parent() -} - func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -635,7 +535,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if isGroupLike(fd) { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..4a1584c9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,7 +5,6 @@ package filedesc import ( - "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -99,7 +98,6 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int - var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,12 +111,8 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 - fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 - fd.L1.Edition = EditionProto3 - case "editions": - fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -126,8 +120,6 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) - case genid.FileDescriptorProto_Options_field_number: - options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -162,13 +154,6 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FileDescriptorProto_Edition_field_number: - fd.L1.Edition = Edition(v) - } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -179,14 +164,6 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 - fd.L1.Edition = EditionProto2 - } - - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - - // Parse editions features from options if any - if options != nil { - fd.unmarshalSeedOptions(options) } // Must allocate all declarations before parsing each descriptor type @@ -242,33 +219,10 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (fd *File) unmarshalSeedOptions(b []byte) { - for b := b; len(b) > 0; { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FileOptions_Features_field_number: - if fd.Syntax() != protoreflect.Editions { - panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) - } - fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i - ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -321,7 +275,6 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -427,13 +380,6 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.MessageOptions_Features_field_number: - md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) - } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -445,7 +391,6 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i - xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -470,38 +415,6 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) - case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - - if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } -} - -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -534,7 +447,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() any { return new(strs.Builder) }, + New: func() interface{} { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..736a19a7 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,11 +45,6 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ - if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { - // A map field might inherit delimited encoding from a file-wide default feature. - // But maps never actually use delimited encoding. (At least for now...) - fd.L1.Kind = protoreflect.MessageKind - } } // Default is resolved here since it depends on Enum being resolved. @@ -419,7 +414,6 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i - fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -471,12 +465,6 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { - fd.L1.Kind = protoreflect.GroupKind - } - if fd.L1.EditionFeatures.IsLegacyRequired { - fd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -501,18 +489,13 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -574,6 +557,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -593,6 +577,25 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index f4107c05..30db19fd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,7 +8,6 @@ package filedesc import ( "fmt" - "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -199,16 +198,6 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } - if isGroupLike(d) { - lowerJSONName := strings.ToLower(d.JSONName()) - if _, ok := p.byJSON[lowerJSONName]; !ok { - p.byJSON[lowerJSONName] = d - } - lowerTextName := strings.ToLower(d.TextName()) - if _, ok := p.byText[lowerTextName]; !ok { - p.byText[lowerTextName] = d - } - } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go deleted file mode 100644 index 11f5f356..00000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "fmt" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/editiondefaults" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/reflect/protoreflect" -) - -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} - -func init() { - unmarshalEditionDefaults(editiondefaults.Defaults) - SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) - SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) - SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) -} - -func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { - for len(b) > 0 { - num, _, n := protowire.ConsumeTag(b) - b = b[n:] - switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) - default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) - } - } - return parent -} - -func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FeatureSet_FieldPresence_field_number: - parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value - parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value - case genid.FeatureSet_EnumType_field_number: - parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value - case genid.FeatureSet_RepeatedFieldEncoding_field_number: - parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value - case genid.FeatureSet_Utf8Validation_field_number: - parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value - case genid.FeatureSet_MessageEncoding_field_number: - parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value - case genid.FeatureSet_JsonFormat_field_number: - parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value - default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: - parent = unmarshalGoFeature(v, parent) - } - } - } - - return parent -} - -func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { - var parentFS EditionFeatures - switch p := parentDesc.(type) { - case *File: - parentFS = p.L1.EditionFeatures - case *Message: - parentFS = p.L1.EditionFeatures - default: - panic(fmt.Sprintf("unknown parent type %T", parentDesc)) - } - return parentFS -} - -func unmarshalEditionDefault(b []byte) { - var ed Edition - var fs EditionFeatures - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: - ed = Edition(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: - fs = unmarshalFeatureSet(v, fs) - case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: - fs = unmarshalFeatureSet(v, fs) - } - } - } - defaultsCache[ed] = fs - defaultsKeys = append(defaultsKeys, ed) -} - -func unmarshalEditionDefaults(b []byte) { - for len(b) > 0 { - num, _, n := protowire.ConsumeTag(b) - b = b[n:] - switch num { - case genid.FeatureSetDefaults_Defaults_field_number: - def, m := protowire.ConsumeBytes(b) - b = b[m:] - unmarshalEditionDefault(def) - case genid.FeatureSetDefaults_MinimumEdition_field_number, - genid.FeatureSetDefaults_MaximumEdition_field_number: - // We don't care about the minimum and maximum editions. If the - // edition we are looking for later on is not in the cache we know - // it is outside of the range between minimum and maximum edition. - _, m := protowire.ConsumeVarint(b) - b = b[m:] - default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) - } - } -} - -func getFeaturesFor(ed Edition) EditionFeatures { - match := EditionUnknown - for _, key := range defaultsKeys { - if key > ed { - break - } - match = key - } - if match == EditionUnknown { - panic(fmt.Sprintf("unsupported edition: %v", ed)) - } - return defaultsCache[match] -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index bfb3b841..28240ebc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,7 +63,6 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea4..f0e38c4e 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []any + GoTypes []interface{} // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []any + goTypes []interface{} depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b5..136f1b21 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,28 +12,6 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" -// Full and short names for google.protobuf.Edition. -const ( - Edition_enum_fullname = "google.protobuf.Edition" - Edition_enum_name = "Edition" -) - -// Enum values for google.protobuf.Edition. -const ( - Edition_EDITION_UNKNOWN_enum_value = 0 - Edition_EDITION_LEGACY_enum_value = 900 - Edition_EDITION_PROTO2_enum_value = 998 - Edition_EDITION_PROTO3_enum_value = 999 - Edition_EDITION_2023_enum_value = 1000 - Edition_EDITION_2024_enum_value = 1001 - Edition_EDITION_1_TEST_ONLY_enum_value = 1 - Edition_EDITION_2_TEST_ONLY_enum_value = 2 - Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 - Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 - Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 - Edition_EDITION_MAX_enum_value = 2147483647 -) - // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -103,7 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -206,12 +184,10 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" - ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" - ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -219,7 +195,6 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -229,12 +204,6 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) -// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. -const ( - ExtensionRangeOptions_DECLARATION_enum_value = 0 - ExtensionRangeOptions_UNVERIFIED_enum_value = 1 -) - // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -243,26 +212,29 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -319,41 +291,12 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) -// Enum values for google.protobuf.FieldDescriptorProto.Type. -const ( - FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 - FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 - FieldDescriptorProto_TYPE_INT64_enum_value = 3 - FieldDescriptorProto_TYPE_UINT64_enum_value = 4 - FieldDescriptorProto_TYPE_INT32_enum_value = 5 - FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 - FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 - FieldDescriptorProto_TYPE_BOOL_enum_value = 8 - FieldDescriptorProto_TYPE_STRING_enum_value = 9 - FieldDescriptorProto_TYPE_GROUP_enum_value = 10 - FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 - FieldDescriptorProto_TYPE_BYTES_enum_value = 12 - FieldDescriptorProto_TYPE_UINT32_enum_value = 13 - FieldDescriptorProto_TYPE_ENUM_enum_value = 14 - FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 - FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 - FieldDescriptorProto_TYPE_SINT32_enum_value = 17 - FieldDescriptorProto_TYPE_SINT64_enum_value = 18 -) - // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) -// Enum values for google.protobuf.FieldDescriptorProto.Label. -const ( - FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 - FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 - FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 -) - // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -525,6 +468,7 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -534,7 +478,6 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" - FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -547,6 +490,7 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -556,7 +500,6 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" - FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -572,6 +515,7 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -581,7 +525,6 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 - FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -591,13 +534,6 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) -// Enum values for google.protobuf.FileOptions.OptimizeMode. -const ( - FileOptions_SPEED_enum_value = 1 - FileOptions_CODE_SIZE_enum_value = 2 - FileOptions_LITE_RUNTIME_enum_value = 3 -) - // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -611,7 +547,6 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" - MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -619,7 +554,6 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" - MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -630,7 +564,6 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 - MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -651,10 +584,8 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" - FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" - FieldOptions_Features_field_name protoreflect.Name = "features" - FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -666,10 +597,8 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" - FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" - FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" - FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,10 +613,8 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 - FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 - FieldOptions_Features_field_number protoreflect.FieldNumber = 21 - FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -697,107 +624,24 @@ const ( FieldOptions_CType_enum_name = "CType" ) -// Enum values for google.protobuf.FieldOptions.CType. -const ( - FieldOptions_STRING_enum_value = 0 - FieldOptions_CORD_enum_value = 1 - FieldOptions_STRING_PIECE_enum_value = 2 -) - // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) -// Enum values for google.protobuf.FieldOptions.JSType. -const ( - FieldOptions_JS_NORMAL_enum_value = 0 - FieldOptions_JS_STRING_enum_value = 1 - FieldOptions_JS_NUMBER_enum_value = 2 -) - // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) -// Enum values for google.protobuf.FieldOptions.OptionRetention. -const ( - FieldOptions_RETENTION_UNKNOWN_enum_value = 0 - FieldOptions_RETENTION_RUNTIME_enum_value = 1 - FieldOptions_RETENTION_SOURCE_enum_value = 2 -) - // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) -// Enum values for google.protobuf.FieldOptions.OptionTargetType. -const ( - FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 - FieldOptions_TARGET_TYPE_FILE_enum_value = 1 - FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 - FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 - FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 - FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 - FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 - FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 - FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 - FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 -) - -// Names for google.protobuf.FieldOptions.EditionDefault. -const ( - FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" - FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" -) - -// Field names for google.protobuf.FieldOptions.EditionDefault. -const ( - FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" - FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" - - FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" - FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" -) - -// Field numbers for google.protobuf.FieldOptions.EditionDefault. -const ( - FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.FieldOptions.FeatureSupport. -const ( - FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" - FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" -) - -// Field names for google.protobuf.FieldOptions.FeatureSupport. -const ( - FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" - FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" - FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" - FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" - - FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" - FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" - FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" - FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" -) - -// Field numbers for google.protobuf.FieldOptions.FeatureSupport. -const ( - FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 - FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 - FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 - FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 -) - // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -806,16 +650,13 @@ const ( // Field names for google.protobuf.OneofOptions. const ( - OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( - OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -830,13 +671,11 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" - EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" - EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -845,7 +684,6 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 - EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -858,24 +696,15 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumValueOptions_Features_field_name protoreflect.Name = "features" - EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" - EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" - EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" - EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" - EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 - EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 - EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 - EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -887,18 +716,15 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( - ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( - ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -913,12 +739,10 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" - MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" - MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -926,7 +750,6 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 - MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -936,13 +759,6 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) -// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 - MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 - MethodOptions_IDEMPOTENT_enum_value = 2 -) - // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -1000,166 +816,6 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) -// Names for google.protobuf.FeatureSet. -const ( - FeatureSet_message_name protoreflect.Name = "FeatureSet" - FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" -) - -// Field names for google.protobuf.FeatureSet. -const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" -) - -// Field numbers for google.protobuf.FeatureSet. -const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 -) - -// Full and short names for google.protobuf.FeatureSet.FieldPresence. -const ( - FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" - FeatureSet_FieldPresence_enum_name = "FieldPresence" -) - -// Enum values for google.protobuf.FeatureSet.FieldPresence. -const ( - FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 - FeatureSet_EXPLICIT_enum_value = 1 - FeatureSet_IMPLICIT_enum_value = 2 - FeatureSet_LEGACY_REQUIRED_enum_value = 3 -) - -// Full and short names for google.protobuf.FeatureSet.EnumType. -const ( - FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" - FeatureSet_EnumType_enum_name = "EnumType" -) - -// Enum values for google.protobuf.FeatureSet.EnumType. -const ( - FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 - FeatureSet_OPEN_enum_value = 1 - FeatureSet_CLOSED_enum_value = 2 -) - -// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. -const ( - FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" - FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" -) - -// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. -const ( - FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 - FeatureSet_PACKED_enum_value = 1 - FeatureSet_EXPANDED_enum_value = 2 -) - -// Full and short names for google.protobuf.FeatureSet.Utf8Validation. -const ( - FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" - FeatureSet_Utf8Validation_enum_name = "Utf8Validation" -) - -// Enum values for google.protobuf.FeatureSet.Utf8Validation. -const ( - FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 - FeatureSet_VERIFY_enum_value = 2 - FeatureSet_NONE_enum_value = 3 -) - -// Full and short names for google.protobuf.FeatureSet.MessageEncoding. -const ( - FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" - FeatureSet_MessageEncoding_enum_name = "MessageEncoding" -) - -// Enum values for google.protobuf.FeatureSet.MessageEncoding. -const ( - FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 - FeatureSet_LENGTH_PREFIXED_enum_value = 1 - FeatureSet_DELIMITED_enum_value = 2 -) - -// Full and short names for google.protobuf.FeatureSet.JsonFormat. -const ( - FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" - FeatureSet_JsonFormat_enum_name = "JsonFormat" -) - -// Enum values for google.protobuf.FeatureSet.JsonFormat. -const ( - FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 - FeatureSet_ALLOW_enum_value = 1 - FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 -) - -// Names for google.protobuf.FeatureSetDefaults. -const ( - FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" - FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" -) - -// Field names for google.protobuf.FeatureSetDefaults. -const ( - FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" - FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" - FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" - - FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" - FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" - FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" -) - -// Field numbers for google.protobuf.FeatureSetDefaults. -const ( - FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 - FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 - FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 -) - -// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. -const ( - FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" - FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" -) - -// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. -const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" - FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" - FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" -) - -// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. -const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 - FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 -) - // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -1261,10 +917,3 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) - -// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. -const ( - GeneratedCodeInfo_Annotation_NONE_enum_value = 0 - GeneratedCodeInfo_Annotation_SET_enum_value = 1 - GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go deleted file mode 100644 index 9a652a2b..00000000 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" - -// Names for google.protobuf.GoFeatures. -const ( - GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" -) - -// Field names for google.protobuf.GoFeatures. -const ( - GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" -) - -// Field numbers for google.protobuf.GoFeatures. -const ( - GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index ad6f80c4..1a38944b 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,11 +18,6 @@ const ( NullValue_enum_name = "NullValue" ) -// Enum values for google.protobuf.NullValue. -const ( - NullValue_NULL_VALUE_enum_value = 0 -) - // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 49bc73e2..e0f75fea 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,13 +18,6 @@ const ( Syntax_enum_name = "Syntax" ) -// Enum values for google.protobuf.Syntax. -const ( - Syntax_SYNTAX_PROTO2_enum_value = 0 - Syntax_SYNTAX_PROTO3_enum_value = 1 - Syntax_SYNTAX_EDITIONS_enum_value = 2 -) - // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -112,43 +105,12 @@ const ( Field_Kind_enum_name = "Kind" ) -// Enum values for google.protobuf.Field.Kind. -const ( - Field_TYPE_UNKNOWN_enum_value = 0 - Field_TYPE_DOUBLE_enum_value = 1 - Field_TYPE_FLOAT_enum_value = 2 - Field_TYPE_INT64_enum_value = 3 - Field_TYPE_UINT64_enum_value = 4 - Field_TYPE_INT32_enum_value = 5 - Field_TYPE_FIXED64_enum_value = 6 - Field_TYPE_FIXED32_enum_value = 7 - Field_TYPE_BOOL_enum_value = 8 - Field_TYPE_STRING_enum_value = 9 - Field_TYPE_GROUP_enum_value = 10 - Field_TYPE_MESSAGE_enum_value = 11 - Field_TYPE_BYTES_enum_value = 12 - Field_TYPE_UINT32_enum_value = 13 - Field_TYPE_ENUM_enum_value = 14 - Field_TYPE_SFIXED32_enum_value = 15 - Field_TYPE_SFIXED64_enum_value = 16 - Field_TYPE_SINT32_enum_value = 17 - Field_TYPE_SINT64_enum_value = 18 -) - // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) -// Enum values for google.protobuf.Field.Cardinality. -const ( - Field_CARDINALITY_UNKNOWN_enum_value = 0 - Field_CARDINALITY_OPTIONAL_enum_value = 1 - Field_CARDINALITY_REQUIRED_enum_value = 2 - Field_CARDINALITY_REPEATED_enum_value = 3 -) - // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index 5d5771c2..a371f98d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...any) error { +func (Export) NewError(f string, x ...interface{}) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = any +type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = any +type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8f..bff041ed 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil || x.isUnexpandedLazy() { + if ei.funcs.isInit == nil { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..e74cefdc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,18 +21,26 @@ type extensionFieldInfo struct { validation validationInfo } +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to - // recompute this metadata repeatedly. But without support for something like - // weak references, such a cache would pin temporary values (like dynamic - // extension types, constructed for the duration of a user request) to the - // heap forever, causing memory usage of the cache to grow unbounded. - // See discussion in https://github.com/golang/protobuf/issues/1521. - return makeExtensionFieldInfo(xt.TypeDescriptor()) + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { @@ -99,28 +107,6 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } -// isUnexpandedLazy returns true if the ExensionField is lazy and not -// yet expanded, which means it's present and already checked for -// initialized required fields. -func (f *ExtensionField) isUnexpandedLazy() bool { - return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 -} - -// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. -// -// The returned buffer has to be kept over whatever operation we're planning, -// as re-retrieving it will fail after the message is lazily decoded. -func (f *ExtensionField) lazyBuffer() []byte { - // This function might be in the critical path, so check the atomic without - // taking a look first, then only take the lock if needed. - if !f.isUnexpandedLazy() { - return nil - } - f.lazy.mu.Lock() - defer f.lazy.mu.Unlock() - return f.lazy.b -} - func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..3fadd241 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,15 +233,9 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(calculatedSize)) - before := len(b) - b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) - if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { - return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) - } - return b, err + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -268,21 +262,14 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { - return protowire.SizeBytes(opts.Options().Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { - mopts := opts.Options() - calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(calculatedSize)) - before := len(b) - b, err := mopts.MarshalAppend(b, m) - if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { - return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) - } - return b, err + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -418,8 +405,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { - return 2*tagsize + opts.Options().Size(m) +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -495,14 +482,10 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) - before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } - if measuredSize := len(b) - before; siz != measuredSize { - return nil, errors.MismatchedSizeCalculation(siz, measuredSize) - } } return b, nil } @@ -537,34 +520,28 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { - mopts := opts.Options() +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(mopts.Size(m)) + tagsize + n += protowire.SizeBytes(proto.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { - mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := mopts.Size(m) + siz := proto.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - before := len(b) - b, err = mopts.MarshalAppend(b, m) + b, err = opts.Options().MarshalAppend(b, m) if err != nil { return b, err } - if measuredSize := len(b) - before; siz != measuredSize { - return nil, errors.MismatchedSizeCalculation(siz, measuredSize) - } } return b, nil } @@ -605,12 +582,11 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { - mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(mopts.Size(m)) + tagsize + n += protowire.SizeBytes(proto.Size(m)) + tagsize } return n } @@ -621,17 +597,13 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := mopts.Size(m) + siz := proto.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } - if measuredSize := len(b) - before; siz != measuredSize { - return nil, errors.MismatchedSizeCalculation(siz, measuredSize) - } } return b, nil } @@ -679,12 +651,11 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { - mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + mopts.Size(m) + n += 2*tagsize + proto.Size(m) } return n } @@ -767,13 +738,12 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { - mopts := opts.Options() +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + mopts.Size(m) + n += 2*tagsize + proto.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index f55dc01e..1a509b63 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,20 +162,11 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growBoolSlice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -741,20 +732,11 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growInt32Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -1156,20 +1138,11 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growInt32Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -1571,20 +1544,11 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growUint32Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -1986,20 +1950,11 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growInt64Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -2401,20 +2356,11 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growInt64Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -2816,20 +2762,11 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := 0 - for _, v := range b { - if v < 0x80 { - count++ - } - } - if count > 0 { - p.growUint64Slice(count) - } - s := *sp for len(b) > 0 { var v uint64 var n int @@ -3208,15 +3145,11 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed32() - if count > 0 { - p.growInt32Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3528,15 +3461,11 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed32() - if count > 0 { - p.growUint32Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3848,15 +3777,11 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed32() - if count > 0 { - p.growFloat32Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4168,15 +4093,11 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed64() - if count > 0 { - p.growInt64Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4488,15 +4409,11 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed64() - if count > 0 { - p.growUint64Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4808,15 +4725,11 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { + s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } - count := len(b) / protowire.SizeFixed64() - if count > 0 { - p.growFloat64Slice(count) - } - s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0ba..111b9d16 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,7 +9,6 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -241,16 +240,11 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) - before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) - if measuredSize := len(b) - before; size != measuredSize && err == nil { - return nil, errors.MismatchedSizeCalculation(size, measuredSize) - } - return b, err + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -265,12 +259,7 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - before := len(b) - b, err = f.mi.marshalAppendPointer(b, val, opts) - if measuredSize := len(b) - before; valSize != measuredSize && err == nil { - return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) - } - return b, err + return f.mi.marshalAppendPointer(b, val, opts) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index 7a16ec13..b7a23faf 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,15 +26,6 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) - if fullyLazyExtensions(opts) { - // Don't expand the extension, instead use the buffer to calculate size - if lb := x.lazyBuffer(); lb != nil { - // We got hold of the buffer, so it's still lazy. - // Don't count the tag size in the extension buffer, it's already added. - size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize - continue - } - } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -94,19 +85,6 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) - - if fullyLazyExtensions(opts) { - // Don't expand the extension if it's still in wire format, instead use the buffer content. - if lb := x.lazyBuffer(); lb != nil { - // The tag inside the lazy buffer is a different tag (the extension - // number), but what we need here is the tag for FieldMessage: - b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) - b = append(b, lb[xi.tagsize:]...) - b = messageset.AppendFieldEnd(b) - return b, nil - } - } - b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 13077751..576dcf3a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case !fd.HasPresence() && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..185ef2ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() any + protoUnwrap() interface{} } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 18cb96fd..f8913651 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() any { +func (ls *listReflect) protoUnwrap() interface{} { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a6..f30b0a05 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() any { +func (ms *mapReflect) protoUnwrap() interface{} { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..845c67d6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,11 +49,8 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - // The size cache contains the size + 1, to allow the - // zero value to be invalid, while also allowing for a - // 0 size to be cached. - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { - return int(size - 1) + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) } } return mi.sizePointerSlow(p, opts) @@ -63,7 +60,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) } return size } @@ -87,16 +84,13 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > (math.MaxInt32 - 1) { + if size > math.MaxInt32 { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) } else { - // The size cache contains the size + 1, to allow the - // zero value to be invalid, while also allowing for a - // 0 size to be cached. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) } } return size @@ -155,14 +149,6 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } -// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. -func fullyLazyExtensions(opts marshalOptions) bool { - // When deterministic marshaling is requested, force an unmarshal for lazy - // extensions to produce a deterministic result, instead of passing through - // bytes lazily that may or may not match what Go Protobuf would produce. - return opts.flags&piface.MarshalDeterministic == 0 -} - func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -172,14 +158,6 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } - if fullyLazyExtensions(opts) { - // Don't expand the extension, instead use the buffer to calculate size - if lb := x.lazyBuffer(); lb != nil { - // We got hold of the buffer, so it's still lazy. - n += len(lb) - continue - } - } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -198,13 +176,6 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) - if fullyLazyExtensions(opts) { - // Don't expand the extension if it's still in wire format, instead use the buffer content. - if lb := x.lazyBuffer(); lb != nil { - b = append(b, lb...) - continue - } - } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -220,13 +191,6 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) - if fullyLazyExtensions(opts) { - // Don't expand the extension if it's still in wire format, instead use the buffer content. - if lb := x.lazyBuffer(); lb != nil { - b = append(b, lb...) - continue - } - } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index e31249f6..cb25b0ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType any + ExtensionType interface{} // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v any) bool { +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index 81b2b1a7..c2a803bb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() any { +func (e *legacyEnumWrapper) protoUnwrap() interface{} { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,7 +167,6 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 - ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..87b30d05 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L1.EditionFeatures = fd.L1.EditionFeatures + xd.L2.IsPacked = fd.L1.IsPacked xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index b649f112..9ab09108 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io" + "io/ioutil" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := io.ReadAll(zr) + b2, err := ioutil.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049..61c483fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,21 +204,15 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } - md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - methods := make([]reflect.Method, 0, 2) - if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { - methods = append(methods, m) - } - if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { - methods = append(methods, m) - } - for _, fn := range methods { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]any); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } } } } @@ -251,7 +245,6 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile - od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -262,7 +255,6 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od - fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -310,14 +302,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.IsWeak || fd.L1.HasPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.EditionFeatures.IsPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) } return opts.Interface() } @@ -347,7 +339,6 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n - md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -567,6 +558,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() any { +func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..4f5fb67a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []any + OneofWrappers []interface{} initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v any, i int) any +type exporter func(v interface{}, i int) interface{} // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -192,17 +192,12 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - methods := make([]reflect.Method, 0, 2) - if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - methods = append(methods, m) - } - if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - methods = append(methods, m) - } - for _, fn := range methods { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]any); ok { - oneofWrappers = vs + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } } } } @@ -256,7 +251,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType any // zero value of enum or message type + valType interface{} // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d..d9ea010b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]any + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []any // either *fieldInfo or *oneofInfo + rangeInfos []interface{} // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]any) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } + xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } - if x.isUnexpandedLazy() { - // Avoid calling x.Value(), which triggers a lazy unmarshal. - return true - } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { - delete(*m, int32(xd.Number())) +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { + xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xd.Type().Zero() + return xt.Zero() } -func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { - xt := xd.Type() +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { + xd := xt.TypeDescriptor() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protorefle isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) } if *m == nil { @@ -302,15 +302,16 @@ func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protorefle x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { + xd := xt.TypeDescriptor() if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xd.Type().New() - m.Set(xd, v) + v := xt.New() + m.Set(xt, v) return v } @@ -393,7 +394,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,13 +422,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() any { +func (m *messageIfaceWrapper) protoUnwrap() interface{} { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -456,7 +457,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd + return nil, xtd.Type() } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b1..5e736c60 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 99dc23c6..741d6e5b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,13 +23,12 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() any { +func (m *messageState) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - mi := m.messageInfo() - mi.init() - return &mi.methods + m.messageInfo().init() + return &m.messageInfo().methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -42,9 +41,8 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - mi := m.messageInfo() - mi.init() - for _, ri := range mi.rangeInfos { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -54,86 +52,77 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := mi.fields[n] + fi := m.messageInfo().fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - mi.extensionMap(m.pointer()).Range(f) + m.messageInfo().extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Has(xd) + return m.messageInfo().extensionMap(m.pointer()).Has(xt) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - mi.extensionMap(m.pointer()).Clear(xd) + m.messageInfo().extensionMap(m.pointer()).Clear(xt) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Get(xd) + return m.messageInfo().extensionMap(m.pointer()).Get(xt) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - mi.extensionMap(m.pointer()).Set(xd, v) + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Mutable(xd) + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.newField() } else { - return xd.Type().New() + return xt.New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - mi := m.messageInfo() - mi.init() - if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - mi := m.messageInfo() - mi.init() - return mi.getUnknown(m.pointer()) + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - mi := m.messageInfo() - mi.init() - mi.setUnknown(m.pointer(), b) + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -154,13 +143,12 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() any { +func (m *messageReflectWrapper) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - mi := m.messageInfo() - mi.init() - return &mi.methods + m.messageInfo().init() + return &m.messageInfo().methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -173,9 +161,8 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - mi := m.messageInfo() - mi.init() - for _, ri := range mi.rangeInfos { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -185,86 +172,77 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := mi.fields[n] + fi := m.messageInfo().fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - mi.extensionMap(m.pointer()).Range(f) + m.messageInfo().extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Has(xd) + return m.messageInfo().extensionMap(m.pointer()).Has(xt) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - mi.extensionMap(m.pointer()).Clear(xd) + m.messageInfo().extensionMap(m.pointer()).Clear(xt) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Get(xd) + return m.messageInfo().extensionMap(m.pointer()).Get(xt) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - mi.extensionMap(m.pointer()).Set(xd, v) + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return mi.extensionMap(m.pointer()).Mutable(xd) + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - mi := m.messageInfo() - mi.init() - if fi, xd := mi.checkField(fd); fi != nil { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { return fi.newField() } else { - return xd.Type().New() + return xt.New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - mi := m.messageInfo() - mi.init() - if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - mi := m.messageInfo() - mi.init() - return mi.getUnknown(m.pointer()) + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - mi := m.messageInfo() - mi.init() - mi.setUnknown(m.pointer(), b) + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index da685e8a..4c491bdf 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer any +type Pointer interface{} // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { +func pointerOfIface(v interface{}) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { return p.AsValueOf(t).Interface() } @@ -159,42 +159,6 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..ee0e0573 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { +func pointerOfIface(v interface{}) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } @@ -138,46 +138,6 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } -func (p pointer) growBoolSlice(addCap int) { - sp := p.BoolSlice() - s := make([]bool, 0, addCap+len(*sp)) - s = s[:len(*sp)] - copy(s, *sp) - *sp = s -} - -func (p pointer) growInt32Slice(addCap int) { - sp := p.Int32Slice() - s := make([]int32, 0, addCap+len(*sp)) - s = s[:len(*sp)] - copy(s, *sp) - *sp = s -} - -func (p pointer) growUint32Slice(addCap int) { - p.growInt32Slice(addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - p.growInt32Slice(addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - sp := p.Int64Slice() - s := make([]int64, 0, addCap+len(*sp)) - s = s[:len(*sp)] - copy(s, *sp) - *sp = s -} - -func (p pointer) growUint64Slice(addCap int) { - p.growInt64Slice(addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - p.growInt64Slice(addCap) -} - // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index a1f09162..1665a68e 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() any { return new([]messageField) }, + New: func() interface{} { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() any { return new([]mapEntry) }, + New: func() interface{} { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index a6e7df24..0b74e765 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { + if flags.ProtoLegacy { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index a008acd0..61a84d34 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !purego && !appengine +// +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go deleted file mode 100644 index 60166f2b..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) string { - return unsafe.String(unsafe.SliceData(b), len(b)) -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) []byte { - return unsafe.Slice(unsafe.StringData(s), len(s)) -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..0999f29d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 31 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534..48d47946 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,8 +51,6 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). -// -// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err @@ -71,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use [Unmarshal] instead. +// Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index 80ed16a0..ec71e717 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// - [Size] reports the size of a message in the wire format. +// • Size reports the size of a message in the wire format. // -// - [Marshal] converts a message to the wire format. -// The [MarshalOptions] type provides more control over wire marshaling. +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. // -// - [Unmarshal] converts a message from the wire format. -// The [UnmarshalOptions] type provides more control over wire unmarshaling. +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. // // # Basic message operations // -// - [Clone] makes a deep copy of a message. +// • Clone makes a deep copy of a message. // -// - [Merge] merges the content of a message into another. +// • Merge merges the content of a message into another. // -// - [Equal] compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// [google.golang.org/protobuf/testing/protocmp]. +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". // -// - [Reset] clears the content of a message. +// • Reset clears the content of a message. // -// - [CheckInitialized] reports whether all required fields in a message are set. +// • CheckInitialized reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] -// access extension field values in a protocol buffer message. +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to -// and from JSON. +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. // -// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to -// and from the text format. +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. // -// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a -// reflection interface for protocol buffer data types. +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. // -// - Package [google.golang.org/protobuf/testing/protocmp] provides features -// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] -// package. +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. // -// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc..bf7f816d 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,17 +5,12 @@ package proto import ( - "errors" - "fmt" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - - protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -75,32 +70,7 @@ type MarshalOptions struct { UseCachedSize bool } -// flags turns the specified MarshalOptions (user-facing) into -// protoiface.MarshalInputFlags (used internally by the marshaler). -// -// See impl.marshalOptions.Options for the inverse operation. -func (o MarshalOptions) flags() protoiface.MarshalInputFlags { - var flags protoiface.MarshalInputFlags - - // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, - // which is why it is not a part of MarshalInputFlags. - - if o.Deterministic { - flags |= protoiface.MarshalDeterministic - } - - if o.UseCachedSize { - flags |= protoiface.MarshalUseCachedSize - } - - return flags -} - // Marshal returns the wire-format encoding of m. -// -// This is the most common entry point for encoding a Protobuf message. -// -// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -146,9 +116,6 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. -// -// This is a less common entry point than [Marshal], which is only needed if you -// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -162,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use [Marshal] instead. +// Most users should use Marshal instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } @@ -178,7 +145,12 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - Flags: o.flags(), + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -196,10 +168,6 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { - var mismatch *protoerrors.SizeMismatchError - if errors.As(err, &mismatch) { - return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) - } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..5f293cda 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,25 +11,22 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface or descriptor as an empty message; no populated - // fields. - if m == nil || xt == nil { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - mr := m.ProtoReflect() - xd := xt.TypeDescriptor() - if mr.Descriptor() != xd.ContainingMessage() { + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { return false } - return mr.Has(xd) + return m.ProtoReflect().Has(xt.TypeDescriptor()) } // ClearExtension clears an extension field such that subsequent -// [HasExtension] calls return false. +// HasExtension calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) @@ -39,7 +36,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) any { +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +48,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +75,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe578..d761ab33 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the [UnmarshalOptions.Merge] option specified. +// into dst with the UnmarshalOptions.Merge option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 575d1483..312d5d45 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,16 +47,11 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - calculatedSize := o.Size(value.Message().Interface()) - b = protowire.AppendVarint(b, uint64(calculatedSize)) - before := len(b) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } - if measuredSize := len(b) - before; calculatedSize != measuredSize { - return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) - } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 7543ee6b..1f0d183b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,20 +15,18 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. // -// - To convert a v1 message to a v2 message, -// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. -// - To convert a v2 message to a v1 message, -// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module -// according to [errors.Is]. +// Error matches all errors produced by packages in the protobuf module. // -// Example usage: -// -// if errors.Is(err, proto.Error) { ... } +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae..f1692b49 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,7 +34,6 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, - Flags: o.flags(), }) return out.Size } @@ -43,7 +42,6 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, - Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f..e4dfb120 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,17 +3,16 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The [protoreflect.FileDescriptor] is a more structured representation of +// The protoreflect.FileDescriptor is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc import ( - "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -25,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by [NewFile] to resolve dependencies. +// Resolver is the resolver used by NewFile to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by [protoregistry.Files]. +// It is implemented by protoregistry.Files. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -62,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new [protoreflect.FileDescriptor] from the provided -// file descriptor message. See [FileOptions.New] for more information. +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new [protoregistry.Files] from the provided -// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new [protoreflect.FileDescriptor] from the provided +// New creates a new protoreflect.FileDescriptor from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -92,19 +91,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 - f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 - f.L1.Edition = filedesc.EditionProto3 - case "editions": - f.L1.Syntax = protoreflect.Editions - f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -117,7 +108,6 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -220,10 +210,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } @@ -241,7 +231,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new [protoregistry.Files] from the provided +// NewFiles creates a new protoregistry.Files from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554..37efda1a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,7 +28,6 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } - e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -69,7 +68,6 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -116,27 +114,6 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } -// canBePacked returns whether the field can use packed encoding: -// https://protobuf.dev/programming-guides/encoding/#packed -func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { - if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { - return false // not a repeated field - } - - switch protoreflect.Kind(fd.GetType()) { - case protoreflect.MessageKind, protoreflect.GroupKind: - return false // not a scalar type field - - case protoreflect.StringKind, protoreflect.BytesKind: - // string and bytes can explicitly not be declared as packed, - // see https://protobuf.dev/programming-guides/encoding/#packed - return false - - default: - return true - } -} - func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -144,15 +121,13 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - if opts.Packed != nil { - f.L1.EditionFeatures.IsPacked = opts.GetPacked() - } + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -162,14 +137,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } } return fs, nil } @@ -181,7 +148,6 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } - o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } @@ -198,13 +164,10 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } - x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - if opts.Packed != nil { - x.L1.EditionFeatures.IsPacked = opts.GetPacked() - } + x.L2.IsPacked = opts.GetPacked() } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab2..27d7e350 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,11 +46,6 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } - if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { - // A map field might inherit delimited encoding from a file-wide default feature. - // But maps never actually use delimited encoding. (At least for now...) - f.L1.Kind = protoreflect.MessageKind - } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { @@ -281,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if !fd.HasPresence() { - return v, ev, errors.New("cannot be specified with implicit field presence") + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2e..9af1d564 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if !e.IsClosed() { + if e.Syntax() == protoreflect.Proto3 { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in open enums do not conflict if the + // Verify that value names in proto3 do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,9 +80,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { - // There are a few limited exceptions only for proto3 - isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { for i, md := range mds { m := &ms[i] @@ -109,13 +107,25 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if isProto3 { + if m.Syntax() == protoreflect.Proto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } } for j, fd := range md.GetField() { @@ -139,7 +149,7 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if !isProto3 { + if f.Syntax() != protoreflect.Proto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -152,29 +162,26 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(file, f); err != nil { + if err := checkValidGroup(f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if isProto3 { + if f.Syntax() == protoreflect.Proto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { - return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) } } - if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { - return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) - } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -208,17 +215,17 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -260,13 +267,13 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(f, x); err != nil { + if err := checkValidGroup(x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { + if x.Syntax() == protoreflect.Proto3 { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -302,25 +309,21 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { +func checkValidGroup(fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): - return errors.New("invalid under proto3 semantics") + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - } - if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { - switch { - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") - } + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go deleted file mode 100644 index 804830ed..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "fmt" - "os" - "sync" - - "google.golang.org/protobuf/internal/editiondefaults" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" -) - -var defaults = &descriptorpb.FeatureSetDefaults{} -var defaultsCacheMu sync.Mutex -var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) - -func init() { - err := proto.Unmarshal(editiondefaults.Defaults, defaults) - if err != nil { - fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) - os.Exit(1) - } -} - -func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { - return filedesc.Edition(epb) -} - -func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { - switch ed { - case filedesc.EditionUnknown: - return descriptorpb.Edition_EDITION_UNKNOWN - case filedesc.EditionProto2: - return descriptorpb.Edition_EDITION_PROTO2 - case filedesc.EditionProto3: - return descriptorpb.Edition_EDITION_PROTO3 - case filedesc.Edition2023: - return descriptorpb.Edition_EDITION_2023 - default: - panic(fmt.Sprintf("unknown value for edition: %v", ed)) - } -} - -func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { - defaultsCacheMu.Lock() - defer defaultsCacheMu.Unlock() - if def, ok := defaultsCache[ed]; ok { - return def - } - edpb := toEditionProto(ed) - if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { - // This should never happen protodesc.(FileOptions).New would fail when - // initializing the file descriptor. - // This most likely means the embedded defaults were not updated. - fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) - os.Exit(1) - } - fsed := defaults.GetDefaults()[0] - // Using a linear search for now. - // Editions are guaranteed to be sorted and thus we could use a binary search. - // Given that there are only a handful of editions (with one more per year) - // there is not much reason to use a binary search. - for _, def := range defaults.GetDefaults() { - if def.GetEdition() <= edpb { - fsed = def - } else { - break - } - } - fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) - proto.Merge(fs, fsed.GetOverridableFeatures()) - defaultsCache[ed] = fs - return fs -} - -// mergeEditionFeatures merges the parent and child feature sets. This function -// should be used when initializing Go descriptors from descriptor protos which -// is why the parent is a filedesc.EditionsFeatures (Go representation) while -// the child is a descriptorproto.FeatureSet (protoc representation). -// Any feature set by the child overwrites what is set by the parent. -func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { - var parentFS filedesc.EditionFeatures - switch p := parentDesc.(type) { - case *filedesc.File: - parentFS = p.L1.EditionFeatures - case *filedesc.Message: - parentFS = p.L1.EditionFeatures - default: - panic(fmt.Sprintf("unknown parent type %T", parentDesc)) - } - if child == nil { - return parentFS - } - if fp := child.FieldPresence; fp != nil { - parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || - *fp == descriptorpb.FeatureSet_EXPLICIT - parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED - } - if et := child.EnumType; et != nil { - parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN - } - - if rfe := child.RepeatedFieldEncoding; rfe != nil { - parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED - } - - if utf8val := child.Utf8Validation; utf8val != nil { - parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY - } - - if me := child.MessageEncoding; me != nil { - parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED - } - - if jf := child.JsonFormat; jf != nil { - parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW - } - - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } - } - - return parentFS -} - -// initFileDescFromFeatureSet initializes editions related fields in fd based -// on fs. If fs is nil it is assumed to be an empty featureset and all fields -// will be initialized with the appropriate default. fd.L1.Edition must be set -// before calling this function. -func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { - dfs := getFeatureSetFor(fd.L1.Edition) - // initialize the featureset with the defaults - fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) - // overwrite any options explicitly specified - fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) -} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d40..a7c5ceff 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,23 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { p.Syntax = proto.String(file.Syntax().String()) } - if file.Syntax() == protoreflect.Editions { - desc := file - if fileImportDesc, ok := file.(protoreflect.FileImport); ok { - desc = fileImportDesc.FileDescriptor - } - - if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { - p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() - } - } return p } -// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -129,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -163,18 +153,6 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } - if field.Syntax() == protoreflect.Editions { - // Editions have no group keyword, this type is only set so that downstream users continue - // treating this as delimited encoding. - if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { - p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() - } - // Editions have no required keyword, this label is only set so that downstream users continue - // treating it as required. - if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { - p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() - } - } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { @@ -190,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -199,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -222,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -232,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -245,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index c85bfaa5..55aa1492 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement [Enum] and [ProtoMessage], +// Enums and messages generated by this module implement Enum and ProtoMessage, // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The [google.golang.org/protobuf/reflect/protodesc] package converts between +// The "google.golang.org/protobuf/reflect/protodesc" package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for +// A type descriptor (e.g., EnumType or MessageType) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement [Enum] and [ProtoMessage], +// Enums and messages generated by this module implement Enum and ProtoMessage, // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The [google.golang.org/protobuf/types/dynamicpb] package can be used to +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The [Enum] and [Message] interfaces provide a reflective view over an +// The Enum and Message interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the +// To convert a proto.Message to a protoreflect.Message, use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The [github.com/golang/protobuf/proto.MessageReflect] function can be used +// The "github.com/golang/protobuf/proto".MessageReflect function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An [EnumType] describes a concrete Go enum type. +// • An EnumType describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An [EnumDescriptor] describes an abstract protobuf enum type. +// • An EnumDescriptor describes an abstract protobuf enum type. // -// • An [Enum] is a concrete enum instance. Generated enums implement Enum. +// • An Enum is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┐ // │ │ @@ -90,26 +90,24 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A [MessageType] describes a concrete Go message type. -// It has a [MessageDescriptor] and can construct a [Message] instance. -// Just as how Go's [reflect.Type] is a reflective description of a Go type, -// a [MessageType] is a reflective description of a Go type for a protobuf message. +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// Just as how Go's reflect.Type is a reflective description of a Go type, +// a MessageType is a reflective description of a Go type for a protobuf message. // -// • A [MessageDescriptor] describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a [MessageType] -// from just a [MessageDescriptor], you can consider looking up the message type -// in the global registry using the FindMessageByName method on -// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] -// or constructing a dynamic [MessageType] using -// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. +// • A MessageDescriptor describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a MessageType +// from just a MessageDescriptor, you can consider looking up the message type +// in the global registry using protoregistry.GlobalTypes.FindMessageByName +// or constructing a dynamic MessageType using dynamicpb.NewMessageType. // -// • A [Message] is a reflective view over a concrete message instance. -// Generated messages implement [ProtoMessage], which can convert to a [Message]. -// Just as how Go's [reflect.Value] is a reflective view over a Go value, -// a [Message] is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to -// calling [reflect.ValueOf], and the [Message.Interface] method is similar to -// calling [reflect.Value.Interface]. +// • A Message is a reflective view over a concrete message instance. +// Generated messages implement ProtoMessage, which can convert to a Message. +// Just as how Go's reflect.Value is a reflective view over a Go value, +// a Message is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the ProtoReflect method is similar to +// calling reflect.ValueOf, and the Message.Interface method is similar to +// calling reflect.Value.Interface. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V @@ -121,15 +119,15 @@ // │ │ // └────── implements ────────┘ // -// • An [ExtensionType] describes a concrete Go implementation of an extension. -// It has an [ExtensionTypeDescriptor] and can convert to/from -// an abstract [Value] and a Go value. +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. // -// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] -// which also has an [ExtensionType]. +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. // -// • An [ExtensionDescriptor] describes an abstract protobuf extension field and -// may not always be an [ExtensionTypeDescriptor]. +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. package protoreflect import ( @@ -144,7 +142,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. +// use the proto.Message type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -153,15 +151,14 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 - Editions Syntax = 4 + Proto2 Syntax = 2 + Proto3 Syntax = 3 ) // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3, Editions: + case Proto2, Proto3: return true default: return false @@ -175,8 +172,6 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" - case Editions: - return "editions" default: return fmt.Sprintf("", s) } @@ -441,7 +436,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each [Name]. +// with a '.' delimiter placed between each Name. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -485,7 +480,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the [Name] itself. +// A single segment FullName is the Name itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec..717b106f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 14: + case 13: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,6 +160,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -178,8 +180,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) - case 50: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,8 +240,6 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) - case 12: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -287,8 +285,6 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) - case 7: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -334,8 +330,6 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { - case 34: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -367,41 +361,16 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) - case 20: - b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) - case 21: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) - case 22: - b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } -func (p *SourcePath) appendFeatureSet(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "field_presence", nil) - case 2: - b = p.appendSingularField(b, "enum_type", nil) - case 3: - b = p.appendSingularField(b, "repeated_field_encoding", nil) - case 4: - b = p.appendSingularField(b, "utf8_validation", nil) - case 5: - b = p.appendSingularField(b, "message_encoding", nil) - case 6: - b = p.appendSingularField(b, "json_format", nil) - } - return b -} - func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -453,8 +422,6 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) - case 50: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -466,8 +433,6 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -481,12 +446,6 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) - case 2: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) - case 3: - b = p.appendSingularField(b, "debug_redact", nil) - case 4: - b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -502,44 +461,12 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) - case 35: - b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } -func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 3: - b = p.appendSingularField(b, "edition", nil) - case 2: - b = p.appendSingularField(b, "value", nil) - } - return b -} - -func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "edition_introduced", nil) - case 2: - b = p.appendSingularField(b, "edition_deprecated", nil) - case 3: - b = p.appendSingularField(b, "deprecation_warning", nil) - case 4: - b = p.appendSingularField(b, "edition_removed", nil) - } - return b -} - func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -564,6 +491,8 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadba..3867470d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. +// For example, suppose we have t1 and t2 which are both MessageDescriptors. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], -// and/or [MessageDescriptor]. +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message] value, + // To avoid a dependency cycle, this method returns a proto.Message value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// [MessageFieldTypes] interface. +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a [MessageType] by providing type information +// MessageFieldTypes extends a MessageType by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. + // Enum returns the EnumType for the ith field in Descriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in MessageDescriptor.Fields. + // Message returns the MessageType for the ith field in Descriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message -// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with FieldDescriptor.HasOptionalKeyword specified. + // with HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete +// ExtensionType encapsulates an ExtensionDescriptor with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see [Descriptor.Parent]), an extension field is a member of some other -// target message (see [FieldDescriptor.ContainingMessage]) and may have no +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(any) Value + ValueOf(interface{}) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,20 +519,20 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) any + InterfaceOf(Value) interface{} // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(any) bool + IsValidInterface(interface{}) bool } // EnumDescriptor describes an enum and // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// [EnumValueDescriptor]. +// EnumValueDescriptor. type EnumDescriptor interface { Descriptor @@ -544,17 +544,11 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges - // IsClosed reports whether this enum uses closed semantics. - // See https://protobuf.dev/programming-guides/enum/#definitions. - // Note: the Go protobuf implementation is not spec compliant and treats - // all enums as open enums. - IsClosed() bool - isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -616,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: [MethodDescriptor]. +// Nested declarations: MethodDescriptor. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06f..37601b78 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. +// Accessor/mutators for individual fields are keyed by FieldDescriptor. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], -// extend the parent message (i.e., have the same message [FullName]), and +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and // be within the parent's extension range. // -// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). -// See [Value] for the Go types associated with a [FieldDescriptor]. -// Providing a [Value] that is invalid or of an incorrect type panics. +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // "google.golang.org/protobuf/runtime/protoiface".Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element [Value] type is determined by [FieldDescriptor.Kind]. -// Providing a [Value] that is invalid or of an incorrect type panics. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. -// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. -// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 654599d4..59165254 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since [Value] does not contain an enum descriptor, +// Since Value does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - [Message] values are equal if they belong to the same message descriptor, +// - Message values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - [List] values are equal if they are the same length and +// - Lists are equal if they are the same length and // each corresponding element is equal. // -// - [Map] values are equal if they have the same set of keys and +// - Maps are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 75f83a2a..7ced876f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v any) Value { +func valueOfIface(v interface{}) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() any { +func (v Value) getIface() interface{} { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 9fe83cef..08e5ef73 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto [Kind]: +// The following shows which Go type is used to represent each proto Kind: // // ╔════════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, // but use different integer encoding methods. // -// The [List] or [Map] types are used if the field cardinality is repeated. -// A field is a [List] if [FieldDescriptor.IsList] reports true. -// A field is a [Map] if [FieldDescriptor.IsMap] reports true. +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, [ValueOf]("hello").Int() panics because this attempts to +// For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// [List], [Map], and [Message] Values are called "composite" values. +// List, Map, and Message Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as [Message.Mutable], +// A composite value acquired with a Mutable method, such as Message.Mutable, // always references the source object. // // For example: @@ -65,12 +65,12 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as [Message.Get], may return an "empty, read-only" +// Some operations, such as Message.Get, may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of any -// to keep the future open for performance optimizations. Using an any +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v any) Value { +func ValueOf(v interface{}) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an any. +// Interface returns v as an interface{}. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() any { +func (v Value) Interface() interface{} { switch v.typ { case nilType: return nil @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements [fmt.Stringer], +// String returns v as a string. Since this method implements fmt.Stringer, // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a [Message] and panics if the type is not a [Message]. +// Message returns v as a Message and panics if the type is not a Message. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a [List] and panics if the type is not a [List]. +// List returns v as a List and panics if the type is not a List. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a [Map] and panics if the type is not a [Map]. +// Map returns v as a Map and panics if the type is not a Map. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. +// MapKey returns v as a MapKey and panics for invalid MapKey types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). -// The following shows what Go type is used to represent each proto [Kind]: +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: // // ╔═════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // ║ string │ StringKind ║ // ╚═════════╧═════════════════════════════════════╝ // -// A MapKey is constructed and accessed through a [Value]: +// A MapKey is constructed and accessed through a Value: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in [Value]; -// converting a [Value] to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an any. -func (k MapKey) Interface() any { +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { return Value(k).Interface() } @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements [fmt.Stringer], +// String returns k as a string. Since this method implements fmt.Stringer, // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a [Value]. +// Value returns k as a Value. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 93% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 7f3583ea..702ddf22 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !purego && !appengine +// +build !purego,!appengine package protoreflect @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { +func typeOf(t interface{}) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v any) Value { +func valueOfIface(v interface{}) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x any) { +func (v Value) getIface() (x interface{}) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go deleted file mode 100644 index f7d38699..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - ifaceHeader struct { - _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() string { - return unsafe.String((*byte)(v.ptr), v.num) -} -func (v Value) getBytes() []byte { - return unsafe.Slice((*byte)(v.ptr), v.num) -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index de177733..aeb55977 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The [Files] registry contains file descriptors and provides the ability +// The Files registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// [Files] only contains protobuf descriptors and has no understanding of Go +// Files only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The [Types] registry contains descriptor types for which there is a known +// The Types registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]any + descsByName map[protoreflect.FullName]interface{} filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]any{ + r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The [Types] type implements this interface. +// The Types type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The [Types] type implements this interface. +// The Types type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]any + typesByName map[protoreflect.FullName]interface{} extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) err // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, [NotFound]) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t any) string { +func typeName(t interface{}) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t any) string { } } -func amendErrorWithCaller(err error, prev, curr any) error { +func amendErrorWithCaller(err error, prev, curr interface{}) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr any) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v any) string { +func goPackage(v interface{}) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07..04c00f73 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,108 +48,6 @@ import ( sync "sync" ) -// The full set of known editions. -type Edition int32 - -const ( - // A placeholder for an unknown edition value. - Edition_EDITION_UNKNOWN Edition = 0 - // A placeholder edition for specifying default behaviors *before* a feature - // was first introduced. This is effectively an "infinite past". - Edition_EDITION_LEGACY Edition = 900 - // Legacy syntax "editions". These pre-date editions, but behave much like - // distinct editions. These can't be used to specify the edition of proto - // files, but feature definitions must supply proto2/proto3 defaults for - // backwards compatibility. - Edition_EDITION_PROTO2 Edition = 998 - Edition_EDITION_PROTO3 Edition = 999 - // Editions that have been released. The specific values are arbitrary and - // should not be depended on, but they will always be time-ordered for easy - // comparison. - Edition_EDITION_2023 Edition = 1000 - Edition_EDITION_2024 Edition = 1001 - // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. - Edition_EDITION_1_TEST_ONLY Edition = 1 - Edition_EDITION_2_TEST_ONLY Edition = 2 - Edition_EDITION_99997_TEST_ONLY Edition = 99997 - Edition_EDITION_99998_TEST_ONLY Edition = 99998 - Edition_EDITION_99999_TEST_ONLY Edition = 99999 - // Placeholder for specifying unbounded edition support. This should only - // ever be used by plugins that can expect to never require any changes to - // support a new edition. - Edition_EDITION_MAX Edition = 2147483647 -) - -// Enum value maps for Edition. -var ( - Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 900: "EDITION_LEGACY", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1001: "EDITION_2024", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", - 2147483647: "EDITION_MAX", - } - Edition_value = map[string]int32{ - "EDITION_UNKNOWN": 0, - "EDITION_LEGACY": 900, - "EDITION_PROTO2": 998, - "EDITION_PROTO3": 999, - "EDITION_2023": 1000, - "EDITION_2024": 1001, - "EDITION_1_TEST_ONLY": 1, - "EDITION_2_TEST_ONLY": 2, - "EDITION_99997_TEST_ONLY": 99997, - "EDITION_99998_TEST_ONLY": 99998, - "EDITION_99999_TEST_ONLY": 99999, - "EDITION_MAX": 2147483647, - } -) - -func (x Edition) Enum() *Edition { - p := new(Edition) - *p = x - return p -} - -func (x Edition) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Edition) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() -} - -func (Edition) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] -} - -func (x Edition) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *Edition) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = Edition(num) - return nil -} - -// Deprecated: Use Edition.Descriptor instead. -func (Edition) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} -} - // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -182,11 +80,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[0] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -227,10 +125,9 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported after google.protobuf. However, Proto3 + // Group type is deprecated and not supported in proto3. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. In Editions, the group wire format - // can be enabled via the `message_encoding` feature. + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -298,11 +195,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -329,24 +226,21 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 - // The required label is only allowed in google.protobuf. In proto3 and Editions - // it's explicitly prohibited. In Editions, the `field_presence` feature - // can be used to get this behavior. FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 3: "LABEL_REPEATED", 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REPEATED": 3, "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, } ) @@ -361,11 +255,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -422,11 +316,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -488,11 +382,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -550,11 +444,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -612,11 +506,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -696,11 +590,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -758,11 +652,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -784,363 +678,6 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } -type FeatureSet_FieldPresence int32 - -const ( - FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 - FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 - FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 - FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 -) - -// Enum value maps for FeatureSet_FieldPresence. -var ( - FeatureSet_FieldPresence_name = map[int32]string{ - 0: "FIELD_PRESENCE_UNKNOWN", - 1: "EXPLICIT", - 2: "IMPLICIT", - 3: "LEGACY_REQUIRED", - } - FeatureSet_FieldPresence_value = map[string]int32{ - "FIELD_PRESENCE_UNKNOWN": 0, - "EXPLICIT": 1, - "IMPLICIT": 2, - "LEGACY_REQUIRED": 3, - } -) - -func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { - p := new(FeatureSet_FieldPresence) - *p = x - return p -} - -func (x FeatureSet_FieldPresence) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() -} - -func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] -} - -func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_FieldPresence(num) - return nil -} - -// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. -func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} -} - -type FeatureSet_EnumType int32 - -const ( - FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 - FeatureSet_OPEN FeatureSet_EnumType = 1 - FeatureSet_CLOSED FeatureSet_EnumType = 2 -) - -// Enum value maps for FeatureSet_EnumType. -var ( - FeatureSet_EnumType_name = map[int32]string{ - 0: "ENUM_TYPE_UNKNOWN", - 1: "OPEN", - 2: "CLOSED", - } - FeatureSet_EnumType_value = map[string]int32{ - "ENUM_TYPE_UNKNOWN": 0, - "OPEN": 1, - "CLOSED": 2, - } -) - -func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { - p := new(FeatureSet_EnumType) - *p = x - return p -} - -func (x FeatureSet_EnumType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() -} - -func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] -} - -func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_EnumType(num) - return nil -} - -// Deprecated: Use FeatureSet_EnumType.Descriptor instead. -func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} -} - -type FeatureSet_RepeatedFieldEncoding int32 - -const ( - FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 - FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 - FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 -) - -// Enum value maps for FeatureSet_RepeatedFieldEncoding. -var ( - FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ - 0: "REPEATED_FIELD_ENCODING_UNKNOWN", - 1: "PACKED", - 2: "EXPANDED", - } - FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ - "REPEATED_FIELD_ENCODING_UNKNOWN": 0, - "PACKED": 1, - "EXPANDED": 2, - } -) - -func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { - p := new(FeatureSet_RepeatedFieldEncoding) - *p = x - return p -} - -func (x FeatureSet_RepeatedFieldEncoding) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() -} - -func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] -} - -func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_RepeatedFieldEncoding(num) - return nil -} - -// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. -func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} -} - -type FeatureSet_Utf8Validation int32 - -const ( - FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 - FeatureSet_NONE FeatureSet_Utf8Validation = 3 -) - -// Enum value maps for FeatureSet_Utf8Validation. -var ( - FeatureSet_Utf8Validation_name = map[int32]string{ - 0: "UTF8_VALIDATION_UNKNOWN", - 2: "VERIFY", - 3: "NONE", - } - FeatureSet_Utf8Validation_value = map[string]int32{ - "UTF8_VALIDATION_UNKNOWN": 0, - "VERIFY": 2, - "NONE": 3, - } -) - -func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { - p := new(FeatureSet_Utf8Validation) - *p = x - return p -} - -func (x FeatureSet_Utf8Validation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() -} - -func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] -} - -func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_Utf8Validation(num) - return nil -} - -// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. -func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} -} - -type FeatureSet_MessageEncoding int32 - -const ( - FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 - FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 - FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 -) - -// Enum value maps for FeatureSet_MessageEncoding. -var ( - FeatureSet_MessageEncoding_name = map[int32]string{ - 0: "MESSAGE_ENCODING_UNKNOWN", - 1: "LENGTH_PREFIXED", - 2: "DELIMITED", - } - FeatureSet_MessageEncoding_value = map[string]int32{ - "MESSAGE_ENCODING_UNKNOWN": 0, - "LENGTH_PREFIXED": 1, - "DELIMITED": 2, - } -) - -func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { - p := new(FeatureSet_MessageEncoding) - *p = x - return p -} - -func (x FeatureSet_MessageEncoding) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() -} - -func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] -} - -func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_MessageEncoding(num) - return nil -} - -// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. -func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} -} - -type FeatureSet_JsonFormat int32 - -const ( - FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 - FeatureSet_ALLOW FeatureSet_JsonFormat = 1 - FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 -) - -// Enum value maps for FeatureSet_JsonFormat. -var ( - FeatureSet_JsonFormat_name = map[int32]string{ - 0: "JSON_FORMAT_UNKNOWN", - 1: "ALLOW", - 2: "LEGACY_BEST_EFFORT", - } - FeatureSet_JsonFormat_value = map[string]int32{ - "JSON_FORMAT_UNKNOWN": 0, - "ALLOW": 1, - "LEGACY_BEST_EFFORT": 2, - } -) - -func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { - p := new(FeatureSet_JsonFormat) - *p = x - return p -} - -func (x FeatureSet_JsonFormat) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() -} - -func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] -} - -func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_JsonFormat(num) - return nil -} - -// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. -func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} -} - // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1179,11 +716,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1202,7 +739,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -1285,8 +822,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -1405,11 +942,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() Edition { +func (x *FileDescriptorProto) GetEdition() string { if x != nil && x.Edition != nil { return *x.Edition } - return Edition_EDITION_UNKNOWN + return "" } // Describes a message type. @@ -1542,14 +1079,13 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // For external users: DO NOT USE. We are in the process of open sourcing - // extension declaration and executing internal cleanups before it can be - // used externally. + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO: flip the default to DECLARATION once all empty ranges + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1605,13 +1141,6 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } -func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1657,12 +1186,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must belong to a oneof to signal - // to old proto3 clients that presence is tracked for this field. This oneof - // is known as a "synthetic" oneof, and this field must be its sole member - // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs - // exist in the descriptor only, and do not generate any API. Synthetic oneofs - // must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -2182,16 +1711,12 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // A proto2 file can set this to true to opt in to UTF-8 checking for Java, - // which will throw an exception if invalid UTF-8 is parsed from the wire or - // assigned to a string field. - // - // TODO: clarify exactly what kinds of field types this option - // applies to, and update these docs accordingly. - // - // Proto3 files already perform these checks. Setting the option explicitly to - // false has no effect: it cannot be used to opt proto3 files out of UTF-8 - // checks. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2213,6 +1738,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -2246,8 +1772,6 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2261,6 +1785,7 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -2368,6 +1893,13 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2431,13 +1963,6 @@ func (x *FileOptions) GetRubyPackage() string { return "" } -func (x *FileOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2481,6 +2006,10 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2501,10 +2030,6 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2514,13 +2039,11 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO This is legacy behavior we plan to remove once downstream + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2600,13 +2123,6 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } -func (x *MessageOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2631,9 +2147,7 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. This option is prohibited in - // Editions, but the `repeated_field_encoding` feature can be used to control - // the behavior. + // false will avoid using packed encoding. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2664,11 +2178,19 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that lazy message fields are still eagerly verified to check - // ill-formed wireformat or missing required fields. Calling IsInitialized() - // on the outer message would fail if the inner message has missing required - // fields. Failed verification would result in parsing failure (except when - // uninitialized messages are acceptable). + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2683,13 +2205,11 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` - EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` - FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2800,30 +2320,17 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { - if x != nil { - return x.Targets - } - return nil -} - -func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { - if x != nil { - return x.EditionDefaults - } - return nil -} - -func (x *FieldOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target } - return nil + return FieldOptions_TARGET_TYPE_UNKNOWN } -func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { if x != nil { - return x.FeatureSupport + return x.Targets } return nil } @@ -2841,8 +2348,6 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2879,13 +2384,6 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } -func (x *OneofOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2911,13 +2409,11 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO Remove this legacy behavior once downstream teams have + // TODO(b/261750190) Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2981,13 +2477,6 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } -func (x *EnumOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3006,22 +2495,13 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` - // Indicate that fields annotated with this enum value should not be printed - // out when using debug formats, e.g. when the field contains sensitive - // credentials. - DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - // Information about the support window of a feature value. - FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) - Default_EnumValueOptions_DebugRedact = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -3063,27 +2543,6 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } -func (x *EnumValueOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *EnumValueOptions) GetDebugRedact() bool { - if x != nil && x.DebugRedact != nil { - return *x.DebugRedact - } - return Default_EnumValueOptions_DebugRedact -} - -func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { - if x != nil { - return x.FeatureSupport - } - return nil -} - func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3097,8 +2556,6 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -3145,13 +2602,6 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } -func (x *ServiceOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -3178,8 +2628,6 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3236,13 +2684,6 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } -func (x *MethodOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3353,171 +2794,6 @@ func (x *UninterpretedOption) GetAggregateValue() string { return "" } -// TODO Enums in C++ gencode (and potentially other languages) are -// not well scoped. This means that each of the feature enums below can clash -// with each other. The short names we've chosen maximize call-site -// readability, but leave us very open to this scenario. A future feature will -// be designed and implemented to handle this, hopefully before we ever hit a -// conflict here. -type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` -} - -func (x *FeatureSet) Reset() { - *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FeatureSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSet) ProtoMessage() {} - -func (x *FeatureSet) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. -func (*FeatureSet) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} -} - -func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { - if x != nil && x.FieldPresence != nil { - return *x.FieldPresence - } - return FeatureSet_FIELD_PRESENCE_UNKNOWN -} - -func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { - if x != nil && x.EnumType != nil { - return *x.EnumType - } - return FeatureSet_ENUM_TYPE_UNKNOWN -} - -func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { - if x != nil && x.RepeatedFieldEncoding != nil { - return *x.RepeatedFieldEncoding - } - return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN -} - -func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { - if x != nil && x.Utf8Validation != nil { - return *x.Utf8Validation - } - return FeatureSet_UTF8_VALIDATION_UNKNOWN -} - -func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { - if x != nil && x.MessageEncoding != nil { - return *x.MessageEncoding - } - return FeatureSet_MESSAGE_ENCODING_UNKNOWN -} - -func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { - if x != nil && x.JsonFormat != nil { - return *x.JsonFormat - } - return FeatureSet_JSON_FORMAT_UNKNOWN -} - -// A compiled specification for the defaults of a set of features. These -// messages are generated from FeatureSet extensions and can be used to seed -// feature resolution. The resolution with this object becomes a simple search -// for the closest matching edition, followed by proto merges. -type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` - // The minimum supported edition (inclusive) when this was constructed. - // Editions before this will not have defaults. - MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` - // The maximum known edition (inclusive) when this was constructed. Editions - // after this will not have reliable defaults. - MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` -} - -func (x *FeatureSetDefaults) Reset() { - *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FeatureSetDefaults) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSetDefaults) ProtoMessage() {} - -func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. -func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} -} - -func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { - if x != nil { - return x.Defaults - } - return nil -} - -func (x *FeatureSetDefaults) GetMinimumEdition() Edition { - if x != nil && x.MinimumEdition != nil { - return *x.MinimumEdition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FeatureSetDefaults) GetMaximumEdition() Edition { - if x != nil && x.MaximumEdition != nil { - return *x.MaximumEdition - } - return Edition_EDITION_UNKNOWN -} - // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { @@ -3579,7 +2855,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3592,7 +2868,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3605,7 +2881,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -3631,7 +2907,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3644,7 +2920,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3657,7 +2933,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -3680,7 +2956,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3693,7 +2969,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3745,7 +3021,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3758,7 +3034,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3802,6 +3078,10 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3814,7 +3094,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3827,7 +3107,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3864,6 +3144,14 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3896,7 +3184,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3909,7 +3197,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3939,143 +3227,6 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } -type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. -} - -func (x *FieldOptions_EditionDefault) Reset() { - *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldOptions_EditionDefault) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions_EditionDefault) ProtoMessage() {} - -func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. -func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *FieldOptions_EditionDefault) GetEdition() Edition { - if x != nil && x.Edition != nil { - return *x.Edition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_EditionDefault) GetValue() string { - if x != nil && x.Value != nil { - return *x.Value - } - return "" -} - -// Information about the support window of a feature. -type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The edition that this feature was first available in. In editions - // earlier than this one, the default assigned to EDITION_LEGACY will be - // used, and proto files will not be able to override it. - EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` - // The edition this feature becomes deprecated in. Using this after this - // edition may trigger warnings. - EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` - // The deprecation warning text if this feature is used after the edition it - // was marked deprecated in. - DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` - // The edition this feature is no longer available in. In editions after - // this one, the last default assigned will be used, and proto files will - // not be able to override it. - EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` -} - -func (x *FieldOptions_FeatureSupport) Reset() { - *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldOptions_FeatureSupport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions_FeatureSupport) ProtoMessage() {} - -func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. -func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} -} - -func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { - if x != nil && x.EditionIntroduced != nil { - return *x.EditionIntroduced - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { - if x != nil && x.EditionDeprecated != nil { - return *x.EditionDeprecated - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { - if x != nil && x.DeprecationWarning != nil { - return *x.DeprecationWarning - } - return "" -} - -func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { - if x != nil && x.EditionRemoved != nil { - return *x.EditionRemoved - } - return Edition_EDITION_UNKNOWN -} - // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -4093,7 +3244,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4106,7 +3257,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4136,75 +3287,6 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } -// A map from every known edition with a unique set of defaults to its -// defaults. Not all editions may be contained here. For a given edition, -// the defaults at the closest matching edition ordered at or before it should -// be used. This field must be in strict ascending order by edition. -type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - // Defaults of features that can be overridden in this edition. - OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` - // Defaults of features that can't be overridden in this edition. - FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { - *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. -func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { - if x != nil && x.Edition != nil { - return *x.Edition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { - if x != nil { - return x.OverridableFeatures - } - return nil -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { - if x != nil { - return x.FixedFeatures - } - return nil -} - type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4214,7 +3296,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition appears. + // the root FileDescriptorProto to the place where the definition occurs. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -4306,7 +3388,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4319,7 +3401,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4332,7 +3414,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -4393,7 +3475,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4406,7 +3488,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4419,7 +3501,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -4468,7 +3550,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -4506,250 +3588,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -4774,130 +3856,88 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, - 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, - 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, - 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, @@ -4927,18 +3967,14 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, @@ -4949,284 +3985,130 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, - 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, - 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, - 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, - 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, - 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, - 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, - 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, - 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, - 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, - 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, - 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, - 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, - 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, - 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, - 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, - 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, - 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, - 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, - 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, - 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, - 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, - 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, - 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, - 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, - 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, } var ( @@ -5241,143 +4123,103 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5386,7 +4228,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5398,7 +4240,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5410,7 +4252,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5422,7 +4264,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5436,7 +4278,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5448,7 +4290,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5460,7 +4302,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5472,7 +4314,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5484,7 +4326,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5496,7 +4338,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5508,7 +4350,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5522,7 +4364,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5536,7 +4378,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5550,7 +4392,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5564,7 +4406,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5578,7 +4420,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5592,7 +4434,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5606,7 +4448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5620,7 +4462,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5632,33 +4474,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5670,7 +4486,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5682,7 +4498,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5694,7 +4510,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5706,7 +4522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5718,7 +4534,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5730,31 +4546,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5766,19 +4558,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5790,7 +4570,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -5808,8 +4588,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 17, - NumMessages: 33, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index 1ba1dfa5..f77ef0de 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -49,13 +49,12 @@ type extensionType struct { // A Message is a dynamically constructed protocol buffer message. // -// Message implements the [google.golang.org/protobuf/proto.Message] interface, -// and may be used with all standard proto package functions -// such as Marshal, Unmarshal, and so forth. +// Message implements the proto.Message interface, and may be used with all +// standard proto package functions such as Marshal, Unmarshal, and so forth. // -// Message also implements the [protoreflect.Message] interface. -// See the [protoreflect] package documentation for that interface for how to -// get and set fields and otherwise interact with the contents of a Message. +// Message also implements the protoreflect.Message interface. See the protoreflect +// package documentation for that interface for how to get and set fields and +// otherwise interact with the contents of a Message. // // Reflection API functions which construct messages, such as NewField, // return new dynamic messages of the appropriate type. Functions which take @@ -88,7 +87,7 @@ func NewMessage(desc protoreflect.MessageDescriptor) *Message { // ProtoMessage implements the legacy message interface. func (m *Message) ProtoMessage() {} -// ProtoReflect implements the [protoreflect.ProtoMessage] interface. +// ProtoReflect implements the protoreflect.ProtoMessage interface. func (m *Message) ProtoReflect() protoreflect.Message { return m } @@ -116,25 +115,25 @@ func (m *Message) Type() protoreflect.MessageType { } // New returns a newly allocated empty message with the same descriptor. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) New() protoreflect.Message { return m.Type().New() } // Interface returns the message. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Interface() protoreflect.ProtoMessage { return m } -// ProtoMethods is an internal detail of the [protoreflect.Message] interface. +// ProtoMethods is an internal detail of the protoreflect.Message interface. // Users should never call this directly. func (m *Message) ProtoMethods() *protoiface.Methods { return nil } // Range visits every populated field in undefined order. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { for num, v := range m.known { fd := m.ext[num] @@ -151,7 +150,7 @@ func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) } // Has reports whether a field is populated. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { m.checkField(fd) if fd.IsExtension() && m.ext[fd.Number()] != fd { @@ -165,7 +164,7 @@ func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { } // Clear clears a field. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Clear(fd protoreflect.FieldDescriptor) { m.checkField(fd) num := fd.Number() @@ -174,7 +173,7 @@ func (m *Message) Clear(fd protoreflect.FieldDescriptor) { } // Get returns the value of a field. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) num := fd.Number() @@ -213,7 +212,7 @@ func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Mutable returns a mutable reference to a repeated, map, or message field. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { @@ -242,7 +241,7 @@ func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Set stores a value in a field. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { m.checkField(fd) if m.known == nil { @@ -285,7 +284,7 @@ func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { } // NewField returns a new value for assignable to the field of a given descriptor. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) switch { @@ -294,7 +293,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { case fd.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: fd, - mapv: make(map[any]protoreflect.Value), + mapv: make(map[interface{}]protoreflect.Value), }) case fd.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: fd}) @@ -306,7 +305,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { } // WhichOneof reports which field in a oneof is populated, returning nil if none are populated. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { for i := 0; i < od.Fields().Len(); i++ { fd := od.Fields().Get(i) @@ -318,13 +317,13 @@ func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.Field } // GetUnknown returns the raw unknown fields. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) GetUnknown() protoreflect.RawFields { return m.unknown } // SetUnknown sets the raw unknown fields. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) SetUnknown(r protoreflect.RawFields) { if m.known == nil { panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) @@ -333,7 +332,7 @@ func (m *Message) SetUnknown(r protoreflect.RawFields) { } // IsValid reports whether the message is valid. -// See [protoreflect.Message] for details. +// See protoreflect.Message for details. func (m *Message) IsValid() bool { return m.known != nil } @@ -450,7 +449,7 @@ func (x *dynamicList) IsValid() bool { type dynamicMap struct { desc protoreflect.FieldDescriptor - mapv map[any]protoreflect.Value + mapv map[interface{}]protoreflect.Value } func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } @@ -499,7 +498,7 @@ func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { return v.List().Len() > 0 case fd.ContainingOneof() != nil: return true - case !fd.HasPresence() && !fd.IsExtension(): + case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension(): switch fd.Kind() { case protoreflect.BoolKind: return v.Bool() @@ -634,11 +633,11 @@ func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { // // The InterfaceOf and ValueOf methods of the extension type are defined as: // -// func (xt extensionType) ValueOf(iv any) protoreflect.Value { +// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { // return protoreflect.ValueOf(iv) // } // -// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { +// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { // return v.Interface() // } // @@ -658,7 +657,7 @@ func (xt extensionType) New() protoreflect.Value { case xt.desc.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: xt.desc, - mapv: make(map[any]protoreflect.Value), + mapv: make(map[interface{}]protoreflect.Value), }) case xt.desc.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) @@ -686,18 +685,18 @@ func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { return xt.desc } -func (xt extensionType) ValueOf(iv any) protoreflect.Value { +func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { v := protoreflect.ValueOf(iv) typecheck(xt.desc, v) return v } -func (xt extensionType) InterfaceOf(v protoreflect.Value) any { +func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { typecheck(xt.desc, v) return v.Interface() } -func (xt extensionType) IsValidInterface(iv any) bool { +func (xt extensionType) IsValidInterface(iv interface{}) bool { return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil } diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index c432817b..5a8010f1 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -23,20 +23,13 @@ type extField struct { // A Types is a collection of dynamically constructed descriptors. // Its methods are safe for concurrent use. // -// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver]. -// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver]. +// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver. +// A Types may be used as a proto.UnmarshalOptions.Resolver. type Types struct { - // atomicExtFiles is used with sync/atomic and hence must be the first word - // of the struct to guarantee 64-bit alignment. - // - // TODO(stapelberg): once we only support Go 1.19 and newer, switch this - // field to be of type atomic.Uint64 to guarantee alignment on - // stack-allocated values, too. - atomicExtFiles uint64 - extMu sync.Mutex - files *protoregistry.Files + extMu sync.Mutex + atomicExtFiles uint64 extensionsByMessage map[extField]protoreflect.ExtensionDescriptor } @@ -52,7 +45,7 @@ func NewTypes(f *protoregistry.Files) *Types { // FindEnumByName looks up an enum by its full name; // e.g., "google.protobuf.Field.Kind". // -// This returns (nil, [protoregistry.NotFound]) if not found. +// This returns (nil, protoregistry.NotFound) if not found. func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -70,7 +63,7 @@ func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumTyp // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, [protoregistry.NotFound]) if not found. +// This returns (nil, protoregistry.NotFound) if not found. func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -86,7 +79,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex // FindExtensionByNumber looks up an extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, [protoregistry.NotFound]) if not found. +// This returns (nil, protoregistry.NotFound) if not found. func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. @@ -103,7 +96,7 @@ func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field proto // FindMessageByName looks up a message by its full name; // e.g. "google.protobuf.Any". // -// This returns (nil, [protoregistry.NotFound]) if not found. +// This returns (nil, protoregistry.NotFound) if not found. func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -119,7 +112,7 @@ func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.Mess // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, [protoregistry.NotFound]) if not found. +// This returns (nil, protoregistry.NotFound) if not found. func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go deleted file mode 100644 index a2ca940c..00000000 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ /dev/null @@ -1,181 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/go_features.proto - -package gofeaturespb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" - sync "sync" -) - -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` -} - -func (x *GoFeatures) Reset() { - *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GoFeatures) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GoFeatures) ProtoMessage() {} - -func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. -func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} -} - -func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { - if x != nil && x.LegacyUnmarshalJsonEnum != nil { - return *x.LegacyUnmarshalJsonEnum - } - return false -} - -var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FeatureSet)(nil), - ExtensionType: (*GoFeatures)(nil), - Field: 1002, - Name: "pb.go", - Tag: "bytes,1002,opt,name=go", - Filename: "google/protobuf/go_features.proto", - }, -} - -// Extension fields to descriptorpb.FeatureSet. -var ( - // optional pb.GoFeatures go = 1002; - E_Go = &file_google_protobuf_go_features_proto_extTypes[0] -) - -var File_google_protobuf_go_features_proto protoreflect.FileDescriptor - -var file_google_protobuf_go_features_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} - -var ( - file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc -) - -func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { - file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) - }) - return file_google_protobuf_go_features_proto_rawDescData -} - -var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet -} -var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_go_features_proto_init() } -func file_google_protobuf_go_features_proto_init() { - if File_google_protobuf_go_features_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_protobuf_go_features_proto_goTypes, - DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, - MessageInfos: file_google_protobuf_go_features_proto_msgTypes, - ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, - }.Build() - File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil - file_google_protobuf_go_features_proto_goTypes = nil - file_google_protobuf_go_features_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d..580b232f 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,8 +237,7 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. As of May 2023, there are no widely used type server - // implementations and no plans to implement one. + // type.googleapis.com. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. @@ -445,7 +444,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []any{ +var file_google_protobuf_any_proto_goTypes = []interface{}{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +461,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go index 4f2fe89e..335be6eb 100644 --- a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go @@ -488,7 +488,7 @@ func file_google_protobuf_api_proto_rawDescGZIP() []byte { } var file_google_protobuf_api_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_protobuf_api_proto_goTypes = []any{ +var file_google_protobuf_api_proto_goTypes = []interface{}{ (*Api)(nil), // 0: google.protobuf.Api (*Method)(nil), // 1: google.protobuf.Method (*Mixin)(nil), // 2: google.protobuf.Mixin @@ -517,7 +517,7 @@ func file_google_protobuf_api_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Api); i { case 0: return &v.state @@ -529,7 +529,7 @@ func file_google_protobuf_api_proto_init() { return nil } } - file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Method); i { case 0: return &v.state @@ -541,7 +541,7 @@ func file_google_protobuf_api_proto_init() { return nil } } - file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Mixin); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd9..df709a8d 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []any{ +var file_google_protobuf_duration_proto_goTypes = []interface{}{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb8..9a7277ba 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []any{ +var file_google_protobuf_empty_proto_goTypes = []interface{}{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb..e8789cb3 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []any{ +var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go index fa185780..0980d5ae 100644 --- a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go @@ -125,7 +125,7 @@ func file_google_protobuf_source_context_proto_rawDescGZIP() []byte { } var file_google_protobuf_source_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_source_context_proto_goTypes = []any{ +var file_google_protobuf_source_context_proto_goTypes = []interface{}{ (*SourceContext)(nil), // 0: google.protobuf.SourceContext } var file_google_protobuf_source_context_proto_depIdxs = []int32{ @@ -142,7 +142,7 @@ func file_google_protobuf_source_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceContext); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cb..d2bac8b8 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by any, map[string]any, and []any. +// a form represented by interface{}, map[string]interface{}, and []interface{}. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the any, map[string]any, and []any +// In order to convert the interface{}, map[string]interface{}, and []interface{} // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]any{ +// m, err := structpb.NewValue(map[string]interface{}{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]any{ +// "address": map[string]interface{}{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []any{ -// map[string]any{ +// "phoneNumbers": []interface{}{ +// map[string]interface{}{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]any{ +// map[string]interface{}{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []any{}, +// "children": []interface{}{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]any) (*Struct, error) { +func NewStruct(v map[string]interface{}) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]any) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]any { +func (x *Struct) AsMap() map[string]interface{} { f := x.GetFields() - vs := make(map[string]any, len(f)) + vs := make(map[string]interface{}, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ +// ║ map[string]interface{} │ stored as StructValue ║ +// ║ []interface{} │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v any) (*Value, error) { +func NewValue(v interface{}) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v any) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]any: + case map[string]interface{}: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []any: + case []interface{}: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() any { +func (x *Value) AsInterface() interface{} { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []any) (*ListValue, error) { +func NewList(v []interface{}) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []any) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []any { +func (x *ListValue) AsSlice() []interface{} { vals := x.GetValues() - vs := make([]any, len(vals)) + vs := make([]interface{}, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []any{ +var file_google_protobuf_struct_proto_goTypes = []interface{}{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645..81511a33 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []any{ +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go index 52887fd5..4cb8d0a5 100644 --- a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go @@ -868,7 +868,7 @@ func file_google_protobuf_type_proto_rawDescGZIP() []byte { var file_google_protobuf_type_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_protobuf_type_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_protobuf_type_proto_goTypes = []any{ +var file_google_protobuf_type_proto_goTypes = []interface{}{ (Syntax)(0), // 0: google.protobuf.Syntax (Field_Kind)(0), // 1: google.protobuf.Field.Kind (Field_Cardinality)(0), // 2: google.protobuf.Field.Cardinality @@ -907,7 +907,7 @@ func file_google_protobuf_type_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Type); i { case 0: return &v.state @@ -919,7 +919,7 @@ func file_google_protobuf_type_proto_init() { return nil } } - file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Field); i { case 0: return &v.state @@ -931,7 +931,7 @@ func file_google_protobuf_type_proto_init() { return nil } } - file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Enum); i { case 0: return &v.state @@ -943,7 +943,7 @@ func file_google_protobuf_type_proto_init() { return nil } } - file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v any, i int) any { + file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnumValue); i { case 0: return &v.state @@ -955,7 +955,7 @@ func file_google_protobuf_type_proto_init() { return nil } } - file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v any, i int) any { + file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Option); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826..762a8713 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []any{ +var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go index 9066bcc7..d0bb96a9 100644 --- a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -1,9 +1,32 @@ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ // -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Author: kenton@google.com (Kenton Varda) // @@ -35,9 +58,8 @@ import ( type CodeGeneratorResponse_Feature int32 const ( - CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 - CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 - CodeGeneratorResponse_FEATURE_SUPPORTS_EDITIONS CodeGeneratorResponse_Feature = 2 + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 ) // Enum value maps for CodeGeneratorResponse_Feature. @@ -45,12 +67,10 @@ var ( CodeGeneratorResponse_Feature_name = map[int32]string{ 0: "FEATURE_NONE", 1: "FEATURE_PROTO3_OPTIONAL", - 2: "FEATURE_SUPPORTS_EDITIONS", } CodeGeneratorResponse_Feature_value = map[string]int32{ - "FEATURE_NONE": 0, - "FEATURE_PROTO3_OPTIONAL": 1, - "FEATURE_SUPPORTS_EDITIONS": 2, + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, } ) @@ -181,11 +201,6 @@ type CodeGeneratorRequest struct { // they import. The files will appear in topological order, so each file // appears before any file that imports it. // - // Note: the files listed in files_to_generate will include runtime-retention - // options only, but all other files will include source-retention options. - // The source_file_descriptors field below is available in case you need - // source-retention options for files_to_generate. - // // protoc guarantees that all proto_files will be written after // the fields above, even though this is not technically guaranteed by the // protobuf wire format. This theoretically could allow a plugin to stream @@ -197,10 +212,6 @@ type CodeGeneratorRequest struct { // Type names of fields and extensions in the FileDescriptorProto are always // fully qualified. ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // File descriptors with all options, including source-retention options. - // These descriptors are only provided for the files listed in - // files_to_generate. - SourceFileDescriptors []*descriptorpb.FileDescriptorProto `protobuf:"bytes,17,rep,name=source_file_descriptors,json=sourceFileDescriptors" json:"source_file_descriptors,omitempty"` // The version number of protocol compiler. CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` } @@ -258,13 +269,6 @@ func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProt return nil } -func (x *CodeGeneratorRequest) GetSourceFileDescriptors() []*descriptorpb.FileDescriptorProto { - if x != nil { - return x.SourceFileDescriptors - } - return nil -} - func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { if x != nil { return x.CompilerVersion @@ -289,18 +293,8 @@ type CodeGeneratorResponse struct { Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` // A bitmask of supported features that the code generator supports. // This is a bitwise "or" of values from the Feature enum. - SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` - // The minimum edition this plugin supports. This will be treated as an - // Edition enum, but we want to allow unknown values. It should be specified - // according the edition enum value, *not* the edition number. Only takes - // effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. - MinimumEdition *int32 `protobuf:"varint,3,opt,name=minimum_edition,json=minimumEdition" json:"minimum_edition,omitempty"` - // The maximum edition this plugin supports. This will be treated as an - // Edition enum, but we want to allow unknown values. It should be specified - // according the edition enum value, *not* the edition number. Only takes - // effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. - MaximumEdition *int32 `protobuf:"varint,4,opt,name=maximum_edition,json=maximumEdition" json:"maximum_edition,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` } func (x *CodeGeneratorResponse) Reset() { @@ -349,20 +343,6 @@ func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { return 0 } -func (x *CodeGeneratorResponse) GetMinimumEdition() int32 { - if x != nil && x.MinimumEdition != nil { - return *x.MinimumEdition - } - return 0 -} - -func (x *CodeGeneratorResponse) GetMaximumEdition() int32 { - if x != nil && x.MaximumEdition != nil { - return *x.MaximumEdition - } - return 0 -} - func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { if x != nil { return x.File @@ -513,7 +493,7 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xcf, 0x02, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, @@ -523,29 +503,18 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x5c, - 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x10, - 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, - 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x04, 0x0a, 0x15, 0x43, - 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, @@ -561,20 +530,18 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x57, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, - 0x4c, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, - 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x53, 0x5f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x53, - 0x10, 0x02, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, - 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, - 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, + 0x4c, 0x10, 0x01, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, } var ( @@ -591,7 +558,7 @@ func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_compiler_plugin_proto_goTypes = []any{ +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature (*Version)(nil), // 1: google.protobuf.compiler.Version (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest @@ -602,15 +569,14 @@ var file_google_protobuf_compiler_plugin_proto_goTypes = []any{ } var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto - 5, // 1: google.protobuf.compiler.CodeGeneratorRequest.source_file_descriptors:type_name -> google.protobuf.FileDescriptorProto - 1, // 2: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version - 4, // 3: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File - 6, // 4: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_google_protobuf_compiler_plugin_proto_init() } @@ -619,7 +585,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Version); i { case 0: return &v.state @@ -631,7 +597,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CodeGeneratorRequest); i { case 0: return &v.state @@ -643,7 +609,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CodeGeneratorResponse); i { case 0: return &v.state @@ -655,7 +621,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v any, i int) any { + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CodeGeneratorResponse_File); i { case 0: return &v.state diff --git a/vendor/modules.txt b/vendor/modules.txt index 34735391..a49806d6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -73,14 +73,11 @@ github.com/beorn7/perks/quantile ## explicit; go 1.21 github.com/brianvoe/gofakeit/v6 github.com/brianvoe/gofakeit/v6/data -# github.com/bufbuild/protocompile v0.14.1 -## explicit; go 1.21 +# github.com/bufbuild/protocompile v0.4.0 +## explicit; go 1.18 github.com/bufbuild/protocompile github.com/bufbuild/protocompile/ast github.com/bufbuild/protocompile/internal -github.com/bufbuild/protocompile/internal/editions -github.com/bufbuild/protocompile/internal/featuresext -github.com/bufbuild/protocompile/internal/messageset github.com/bufbuild/protocompile/linker github.com/bufbuild/protocompile/options github.com/bufbuild/protocompile/parser @@ -137,9 +134,6 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render -# github.com/go-ping/ping v1.1.0 -## explicit; go 1.14 -github.com/go-ping/ping # github.com/go-pkgz/expirable-cache v0.0.3 ## explicit; go 1.14 github.com/go-pkgz/expirable-cache @@ -186,8 +180,8 @@ github.com/gogo/protobuf/types # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.4 -## explicit; go 1.17 +# github.com/golang/protobuf v1.5.3 +## explicit; go 1.9 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -197,7 +191,7 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 ## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.0 @@ -223,8 +217,8 @@ github.com/influxdata/line-protocol/v2/lineprotocol # github.com/itchyny/timefmt-go v0.1.5 ## explicit; go 1.17 github.com/itchyny/timefmt-go -# github.com/jhump/protoreflect v1.16.0 -## explicit; go 1.19 +# github.com/jhump/protoreflect v1.15.1 +## explicit; go 1.18 github.com/jhump/protoreflect/codec github.com/jhump/protoreflect/desc github.com/jhump/protoreflect/desc/internal @@ -234,6 +228,7 @@ github.com/jhump/protoreflect/desc/sourceinfo github.com/jhump/protoreflect/dynamic github.com/jhump/protoreflect/dynamic/grpcdynamic github.com/jhump/protoreflect/grpcreflect +github.com/jhump/protoreflect/grpcreflect/internal/grpc_reflection_v1 github.com/jhump/protoreflect/internal github.com/jhump/protoreflect/internal/codec # github.com/json-iterator/go v1.1.12 @@ -314,8 +309,8 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.46.0 -## explicit; go 1.19 +# github.com/prometheus/prometheus v0.39.1 +## explicit; go 1.18 github.com/prometheus/prometheus/util/strutil # github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec ## explicit; go 1.12 @@ -342,12 +337,6 @@ github.com/tidwall/match # github.com/tidwall/pretty v1.2.0 ## explicit; go 1.16 github.com/tidwall/pretty -# github.com/tidwall/tinylru v1.1.0 -## explicit; go 1.14 -github.com/tidwall/tinylru -# github.com/tidwall/wal v1.1.7 -## explicit; go 1.13 -github.com/tidwall/wal # github.com/tinylib/msgp v1.1.6 ## explicit; go 1.14 github.com/tinylib/msgp/msgp @@ -391,8 +380,8 @@ go.uber.org/zap/zapcore # golang.org/x/arch v0.0.0-20210923205945-b76863e36670 ## explicit; go 1.17 golang.org/x/arch/x86/x86asm -# golang.org/x/crypto v0.23.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.14.0 +## explicit; go 1.17 golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 ## explicit; go 1.20 @@ -401,8 +390,8 @@ golang.org/x/exp/slices # golang.org/x/mod v0.13.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.25.0 -## explicit; go 1.18 +# golang.org/x/net v0.16.0 +## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/html golang.org/x/net/html/atom @@ -419,19 +408,18 @@ golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/trace -# golang.org/x/sync v0.8.0 -## explicit; go 1.18 -golang.org/x/sync/errgroup +# golang.org/x/sync v0.4.0 +## explicit; go 1.17 golang.org/x/sync/semaphore -# golang.org/x/sys v0.20.0 -## explicit; go 1.18 +# golang.org/x/sys v0.13.0 +## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.15.0 -## explicit; go 1.18 +# golang.org/x/text v0.13.0 +## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -478,11 +466,11 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.11 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.61.0 -## explicit; go 1.19 +# google.golang.org/grpc v1.51.0 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -499,7 +487,6 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog -google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff @@ -514,12 +501,10 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil -google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns -google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -531,24 +516,20 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection -google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver -google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.31.0 +## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand -google.golang.org/protobuf/internal/editiondefaults -google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -573,7 +554,6 @@ google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/dynamicpb -google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/apipb google.golang.org/protobuf/types/known/durationpb From 7ad38e3d6808fadb3129a7ad207b53b54325472d Mon Sep 17 00:00:00 2001 From: chenxing Date: Wed, 12 Nov 2025 15:49:24 +0800 Subject: [PATCH 08/10] feat: dialtesting grpc --- dialtesting/grpc.go | 1 - 1 file changed, 1 deletion(-) diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index 8274e18d..ab269944 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -503,7 +503,6 @@ func (t *GRPCTask) run() error { t.reqCost = time.Since(start) }() - // Check configuration opt := t.AdvanceOptions if opt == nil || opt.RequestOptions == nil { t.reqError = "request options required" From df7dafad89b1ecb66c64a2bb3600774896d4f044 Mon Sep 17 00:00:00 2001 From: chenxing Date: Thu, 13 Nov 2025 13:37:18 +0800 Subject: [PATCH 09/10] feat: dialtesting grpc --- dialtesting/grpc.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index ab269944..d7d3def6 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -498,11 +498,6 @@ func (t *GRPCTask) getJSONRequest() string { } func (t *GRPCTask) run() error { - start := time.Now() - defer func() { - t.reqCost = time.Since(start) - }() - opt := t.AdvanceOptions if opt == nil || opt.RequestOptions == nil { t.reqError = "request options required" @@ -554,8 +549,10 @@ func (t *GRPCTask) run() error { } // Execute RPC call + rpcStart := time.Now() stub := grpcdynamic.NewStub(conn) resp, err := stub.InvokeRpc(ctx, method, msg) + t.reqCost = time.Since(rpcStart) if err != nil { t.reqError = err.Error() return nil From 9d140734b30edd8391079698bd49f91eaa24b7d4 Mon Sep 17 00:00:00 2001 From: chenxing Date: Thu, 20 Nov 2025 10:54:57 +0800 Subject: [PATCH 10/10] feat: dialtesting grpc --- dialtesting/grpc.go | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/dialtesting/grpc.go b/dialtesting/grpc.go index d7d3def6..f3bcb1f2 100644 --- a/dialtesting/grpc.go +++ b/dialtesting/grpc.go @@ -103,8 +103,10 @@ type GRPCTask struct { timeout time.Duration postScriptResult *ScriptResult - rawTask *GRPCTask - methodDescriptor *pdesc.MethodDescriptor // cached method descriptor for ProtoFiles discovery + rawTask *GRPCTask + healthMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for HealthCheck discovery + protoFilesMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for ProtoFiles discovery + reflectionMethodDescriptor *pdesc.MethodDescriptor // cached method descriptor for Reflection discovery } func (t *GRPCTask) initTask() { @@ -182,11 +184,17 @@ func (t *GRPCTask) init() error { // Cache method descriptor if using ProtoFiles discovery if reqOpt.ProtoFiles != nil && len(reqOpt.ProtoFiles.ProtoFiles) > 0 { - methodDesc, err := t.findMethodAmongProtofiles() + _, err := t.findMethodAmongProtofiles() if err != nil { return fmt.Errorf("find method descriptor failed: %w", err) } - t.methodDescriptor = methodDesc + } + + if reqOpt.HealthCheck != nil { + _, err := t.findHealthCheckMethod() + if err != nil { + return fmt.Errorf("find health check method failed: %w", err) + } } return nil @@ -262,6 +270,10 @@ func (t *GRPCTask) findMethod(ctx context.Context, conn *grpc.ClientConn) (*pdes } func (t *GRPCTask) findHealthCheckMethod() (*pdesc.MethodDescriptor, error) { + if t.healthMethodDescriptor != nil { + return t.healthMethodDescriptor, nil + } + healthFD := grpc_health_v1.File_grpc_health_v1_health_proto if healthFD == nil { return nil, fmt.Errorf("health check file descriptor not available") @@ -281,11 +293,16 @@ func (t *GRPCTask) findHealthCheckMethod() (*pdesc.MethodDescriptor, error) { if md == nil { return nil, fmt.Errorf("health check method %s not found", HealthCheckMethodName) } + t.healthMethodDescriptor = md return md, nil } func (t *GRPCTask) findMethodByReflection(ctx context.Context, conn *grpc.ClientConn) (*pdesc.MethodDescriptor, error) { + if t.reflectionMethodDescriptor != nil { + return t.reflectionMethodDescriptor, nil + } + opt := t.AdvanceOptions if opt == nil || opt.RequestOptions == nil || opt.RequestOptions.Reflection == nil { return nil, fmt.Errorf("reflection discovery not configured") @@ -321,13 +338,14 @@ func (t *GRPCTask) findMethodByReflection(ctx context.Context, conn *grpc.Client if md == nil { return nil, fmt.Errorf("method %s not found in service %s", methodName, serviceName) } + t.reflectionMethodDescriptor = md return md, nil } func (t *GRPCTask) findMethodAmongProtofiles() (*pdesc.MethodDescriptor, error) { // Return cached method descriptor if available - if t.methodDescriptor != nil { - return t.methodDescriptor, nil + if t.protoFilesMethodDescriptor != nil { + return t.protoFilesMethodDescriptor, nil } opt := t.AdvanceOptions @@ -371,6 +389,7 @@ func (t *GRPCTask) findMethodAmongProtofiles() (*pdesc.MethodDescriptor, error) for _, fd := range desc { if sd := fd.FindService(service); sd != nil { if md := sd.FindMethodByName(method); md != nil { + t.protoFilesMethodDescriptor = md return md, nil } } @@ -942,7 +961,7 @@ func (t *GRPCTask) renderProtoFiles(protoFiles *GRPCProtoFilesDiscovery, fm temp } // if full method is changed, clear the cached method descriptor if t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod != fullMethod { - t.methodDescriptor = nil + t.protoFilesMethodDescriptor = nil } t.AdvanceOptions.RequestOptions.ProtoFiles.FullMethod = fullMethod } @@ -968,6 +987,10 @@ func (t *GRPCTask) renderReflection(reflection *GRPCReflectionDiscovery, fm temp if err != nil { return fmt.Errorf("render reflection full method failed: %w", err) } + // if full method is changed, clear the cached method descriptor + if t.AdvanceOptions.RequestOptions.Reflection.FullMethod != fullMethod { + t.reflectionMethodDescriptor = nil + } t.AdvanceOptions.RequestOptions.Reflection.FullMethod = fullMethod }